code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ... import PretrainedConfig
__snake_case = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_a = 'nezha'
def __init__( self , UpperCamelCase_=21128 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=64 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :int = max_relative_position
UpperCamelCase__ :Optional[int] = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Dict = classifier_dropout
UpperCamelCase__ :Optional[int] = use_cache
| 97
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 97
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if not postfix_notation:
return 0
lowerCamelCase : int = {'+', '-', '*', '/'}
lowerCamelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
lowerCamelCase , lowerCamelCase : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(a_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'ChineseCLIPImageProcessor'
lowercase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> List[str]:
lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
lowerCamelCase : int = kwargs.pop('feature_extractor' )
lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Tuple = self.image_processor
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
lowerCamelCase : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
| 205
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""input_values""", """padding_mask"""]
def __init__( self , __lowerCamelCase = 1 , __lowerCamelCase = 2_4000 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = chunk_length_s
__A : int = overlap
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
__A : List[str] = True
__A : Union[str, Any] = bool(
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__A : Optional[int] = [np.asarray(__lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__A : str = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__A : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__A : List[str] = [np.asarray(__lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCamelCase ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
__A : Union[str, Any] = None
__A : int = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__A : List[Any] = min(array.shape[0] for array in raw_audio )
__A : Optional[Any] = int(np.floor(max_length / self.chunk_stride ) )
__A : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__A : Tuple = max(array.shape[0] for array in raw_audio )
__A : int = int(np.ceil(max_length / self.chunk_stride ) )
__A : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
__A : Dict = '''max_length'''
else:
__A : int = input_values
# normal padding on batch
if padded_inputs is None:
__A : List[str] = self.pad(
__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , padding=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if padding:
__A : Union[str, Any] = padded_inputs.pop('''attention_mask''' )
__A : Dict = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
__A : Union[str, Any] = example[..., None]
input_values.append(example.T )
__A : Tuple = input_values
if return_tensors is not None:
__A : List[Any] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 179
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
# setable values
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCamelCase = 42
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , __lowerCamelCase = 1000 , __lowerCamelCase = 0.0_0_0_1 , __lowerCamelCase = 0.0_2 , __lowerCamelCase = "linear" , __lowerCamelCase = None , __lowerCamelCase = "fixed_small" , __lowerCamelCase = True , __lowerCamelCase = "epsilon" , __lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
__A : Tuple = dtype
def UpperCamelCase__( self , __lowerCamelCase = None ):
'''simple docstring'''
if common is None:
__A : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__A : Tuple = jnp.array(1.0 , dtype=self.dtype )
__A : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return sample
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = () ):
'''simple docstring'''
__A : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__A : Optional[Any] = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = state.common.alphas_cumprod[t]
__A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__A : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__A : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__A : List[Any] = jnp.clip(__lowerCamelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__A : Optional[Any] = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__A : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__A : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__A : Optional[Any] = variance
__A : Optional[Any] = state.common.betas[t]
__A : Any = (predicted_variance + 1) / 2
__A : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True , ):
'''simple docstring'''
__A : Optional[int] = timestep
if key is None:
__A : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__A , __A : Tuple = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
__A : List[str] = None
# 1. compute alphas, betas
__A : Dict = state.common.alphas_cumprod[t]
__A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__A : Tuple = 1 - alpha_prod_t
__A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__A : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__A : Any = model_output
elif self.config.prediction_type == "v_prediction":
__A : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__A : str = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__A : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__A : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__A : List[Any] = jax.random.split(__lowerCamelCase , num=1 )
__A : List[str] = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__A : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__A : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 179
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase : List[Any] = ['''accelerate''', '''launch''']
UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase : Union[str, Any] = '''default_config.yaml'''
UpperCAmelCase : Union[str, Any] = config_folder / config_file
UpperCAmelCase : Union[str, Any] = config_folder / '''_default_config.yaml'''
UpperCAmelCase : List[Any] = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Optional[int] ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Any ):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''test-tpu'''
UpperCAmelCase : Optional[int] = '''us-central1-a'''
UpperCAmelCase : List[str] = '''ls'''
UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
UpperCAmelCase : Optional[Any] = '''cd /usr/share'''
UpperCAmelCase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase : str = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase_ ( self : Any ):
_A = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Dict ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
| 271
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a = logging.get_logger(__name__)
a = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Tuple=None , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
if config is None:
assert isinstance(self.model , _UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A = self.model.config
else:
_A = config
_A = data_args
_A = self.config.tgt_vocab_size if isinstance(self.config , _UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
_A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A = label_smoothed_nll_loss
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : int ):
if self.optimizer is None:
_A = ['bias', 'LayerNorm.weight']
_A = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A = Adafactor
_A = {'scale_parameter': False, 'relative_step': False}
else:
_A = AdamW
_A = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_A = self.args.learning_rate
if self.sharded_ddp:
_A = OSS(
params=_UpperCAmelCase , optim=_UpperCAmelCase , **_UpperCAmelCase , )
else:
_A = optimizer_cls(_UpperCAmelCase , **_UpperCAmelCase )
if self.lr_scheduler is None:
_A = self._get_lr_scheduler(_UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any ):
_A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_UpperCAmelCase )
return scheduler
def lowerCAmelCase_ ( self : Any ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A = model(**_UpperCAmelCase , use_cache=_UpperCAmelCase )[0]
_A = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A = model(**_UpperCAmelCase , labels=_UpperCAmelCase , use_cache=_UpperCAmelCase )[:2]
else:
# compute label smoothed loss
_A = model(**_UpperCAmelCase , use_cache=_UpperCAmelCase )[0]
_A = torch.nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
_A , _A = self.loss_fn(_UpperCAmelCase , _UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ):
_A = inputs.pop('labels' )
_A , _A = self._compute_loss(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return loss
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , _UpperCAmelCase : bool , _UpperCAmelCase : Optional[List[str]] = None , ):
_A = self._prepare_inputs(_UpperCAmelCase )
_A = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A = self._pad_tensors_to_max_len(_UpperCAmelCase , gen_kwargs['max_length'] )
_A = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_A , _A = self._compute_loss(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A = self._pad_tensors_to_max_len(_UpperCAmelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
# If PAD token is not defined at least EOS token has to be defined
_A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
_A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A = tensor
return padded_tensor
| 271
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = """▁"""
_lowerCamelCase : List[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : List[str] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_lowerCamelCase : Optional[int] = {
"""google/reformer-crime-and-punishment""": 524288,
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]="</s>" , UpperCAmelCase__ : Optional[Any]="<unk>" , UpperCAmelCase__ : List[str]=[] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) ->Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int , UpperCAmelCase__ : List[str]) ->Dict:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any) ->int:
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str]) ->Tuple:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A__ = self.sp_model.IdToPiece(UpperCAmelCase__)
return token
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Dict) ->Tuple:
'''simple docstring'''
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 14
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17
| 0
|
__snake_case : Union[str, Any] ='Input must be a string of 8 numbers plus letter'
__snake_case : int ='TRWAGMYFPDXBNJZSQVHLCKE'
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Dict = f"""Expected string as input, found {type(lowerCamelCase_).__name__}"""
raise TypeError(lowerCamelCase_)
lowerCAmelCase__ : Tuple = spanish_id.replace('''-''' ,'''''').upper()
if len(lowerCamelCase_) != 9:
raise ValueError(lowerCamelCase_)
try:
lowerCAmelCase__ : Union[str, Any] = int(spanish_id_clean[0:8])
lowerCAmelCase__ : Optional[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase_) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase_)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=7 ,__lowerCamelCase=3 ,__lowerCamelCase=18 ,__lowerCamelCase=30 ,__lowerCamelCase=4_00 ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=[0.5, 0.5, 0.5] ,) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : List[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
lowerCAmelCase__ : List[str] = do_thumbnail
lowerCAmelCase__ : str = do_align_axis
lowerCAmelCase__ : Optional[Any] = do_pad
lowerCAmelCase__ : Tuple = do_normalize
lowerCAmelCase__ : List[Any] = image_mean
lowerCAmelCase__ : List[str] = image_std
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 20} )
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=(42, 84) )
self.assertEqual(image_processor.size ,{'''height''': 84, '''width''': 42} )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : int = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
@is_flaky()
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 94
| 0
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase : int =10
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
for i in range(__UpperCAmelCase , __UpperCAmelCase):
if array[i] == target:
return i
return -1
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = 0
UpperCamelCase_ = len(__UpperCAmelCase)
while left <= right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
UpperCamelCase_ = (left + right) // 3 + 1
UpperCamelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase_ = one_third - 1
elif array[two_third] < target:
UpperCamelCase_ = two_third + 1
else:
UpperCamelCase_ = one_third + 1
UpperCamelCase_ = two_third - 1
else:
return -1
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
if left < right:
if right - left < precision:
return lin_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
UpperCamelCase_ = (left + right) // 3 + 1
UpperCamelCase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__UpperCAmelCase , one_third - 1 , __UpperCAmelCase , __UpperCAmelCase)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __UpperCAmelCase , __UpperCAmelCase)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : str =input("""Enter numbers separated by comma:\n""").strip()
UpperCAmelCase : str =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
UpperCAmelCase : Any =ite_ternary_search(collection, target)
UpperCAmelCase : List[Any] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"Iterative search: {target} found at positions: {resulta}")
print(F"Recursive search: {target} found at positions: {resulta}")
else:
print("""Not found""")
| 128
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple:
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 0
|
'''simple docstring'''
def _snake_case ( A , A ) -> float:
return base * power(A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
__UpperCAmelCase = int(input('''Enter the base: ''').strip())
__UpperCAmelCase = int(input('''Enter the exponent: ''').strip())
__UpperCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__UpperCAmelCase = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 228
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( A , A , A , A=5 ) -> List[str]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowerCAmelCase__ = torch.tensor(tokenizer.encode(A , add_special_tokens=A ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase__ = model(A )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase__ = logits[0, masked_index, :]
lowerCAmelCase__ = logits.softmax(dim=0 )
lowerCAmelCase__ , lowerCAmelCase__ = prob.topk(k=A , dim=0 )
lowerCAmelCase__ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A ) )] )
lowerCAmelCase__ = tokenizer.mask_token
lowerCAmelCase__ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowerCAmelCase__ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A ) , A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A , A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
__UpperCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__UpperCAmelCase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 228
| 1
|
def a ( A__ : int , A__ : int ) -> int:
"""simple docstring"""
while b:
_lowercase , _lowercase =b, a % b
return a
def a ( A__ : int , A__ : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(A__ , a % b )
def a ( ) -> Tuple:
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 205
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__magic_name__ = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__=3_2):
set_seed(0)
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3)
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.00_01)
return model, optimizer
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).to(lowerCAmelCase__) for _ in range(4)]
__SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowerCAmelCase__) for _ in range(4)]
# train with a DDPM scheduler
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2)
model.train().to(lowerCAmelCase__)
for i in range(4):
optimizer.zero_grad()
__SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
| 255
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Union[str, Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self : Dict ,_a : List[Any]=None ,_a : Tuple=None ,_a : Optional[int]="<s>" ,_a : Dict="</s>" ,_a : List[str]="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[str]="<unk>" ,_a : str="<pad>" ,_a : Optional[Any]="<mask>" ,_a : List[Any]=None ,_a : Union[str, Any]=None ,_a : Union[str, Any]=None ,**_a : Any ,):
'''simple docstring'''
_a : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
vocab_file=_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,src_lang=_a ,tgt_lang=_a ,additional_special_tokens=_a ,**_a ,)
_a : Tuple = vocab_file
_a : Union[str, Any] = False if not self.vocab_file else True
_a : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_a : Dict = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_a : Dict = src_lang if src_lang is not None else 'en_XX'
_a : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
_a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowercase ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
_a : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Union[str, Any] = [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : List[Any] ,_a : List[str] ,_a : str ,_a : Optional[str] ,_a : Optional[str] ,**_a : List[str] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : Tuple = src_lang
_a : str = self(_a ,add_special_tokens=_a ,return_tensors=_a ,**_a )
_a : Optional[int] = self.convert_tokens_to_ids(_a )
_a : Union[str, Any] = tgt_lang_id
return inputs
def __lowercase ( self : Optional[Any] ,_a : List[str] ,_a : str = "en_XX" ,_a : Optional[List[str]] = None ,_a : str = "ro_RO" ,**_a : Dict ,):
'''simple docstring'''
_a : Tuple = src_lang
_a : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_a ,_a ,**_a )
def __lowercase ( self : str ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Optional[Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.convert_tokens_to_ids(_a )
_a : Optional[int] = []
_a : List[str] = [self.eos_token_id, self.cur_lang_code]
_a : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
_a : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_a : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = self.convert_tokens_to_ids(_a )
_a : List[str] = []
_a : Tuple = [self.eos_token_id, self.cur_lang_code]
_a : str = self.convert_ids_to_tokens(self.prefix_tokens )
_a : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
_a : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def __lowercase ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_a : List[Any] = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 271
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "encodec"
def __init__( self , __a=[1.5, 3.0, 6.0, 12.0, 24.0] , __a=2_4000 , __a=1 , __a=False , __a=None , __a=None , __a=128 , __a=32 , __a=1 , __a=[8, 5, 4, 2] , __a="weight_norm" , __a=7 , __a=7 , __a=3 , __a=2 , __a=True , __a="reflect" , __a=2 , __a=2 , __a=1.0 , __a=1024 , __a=None , __a=True , **__a , ):
'''simple docstring'''
__a : Union[str, Any] = target_bandwidths
__a : List[str] = sampling_rate
__a : Dict = audio_channels
__a : List[str] = normalize
__a : Union[str, Any] = chunk_length_s
__a : List[str] = overlap
__a : Optional[int] = hidden_size
__a : int = num_filters
__a : Dict = num_residual_layers
__a : Tuple = upsampling_ratios
__a : Union[str, Any] = norm_type
__a : List[Any] = kernel_size
__a : Any = last_kernel_size
__a : Any = residual_kernel_size
__a : Any = dilation_growth_rate
__a : str = use_causal_conv
__a : Dict = pad_mode
__a : Any = compress
__a : Optional[int] = num_lstm_layers
__a : Tuple = trim_right_ratio
__a : Tuple = codebook_size
__a : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
__a : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 369
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 0
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCAmelCase ( _snake_case ):
lowercase = ""
lowercase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(self , **_lowerCamelCase )
__UpperCamelCase = repo_info
__UpperCamelCase = token
__UpperCamelCase = None
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'name': str(_lowerCamelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
__UpperCamelCase = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def UpperCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
self._get_dirs()
__UpperCamelCase = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ):
'''simple docstring'''
self._get_dirs()
__UpperCamelCase = PurePosixPath(path.strip('/' ) )
__UpperCamelCase = {}
for p, f in self.dir_cache.items():
__UpperCamelCase = PurePosixPath(p.strip('/' ) )
__UpperCamelCase = p.parent
if root == path:
__UpperCamelCase = f
__UpperCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 316
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if audios is not None:
a :Tuple = self.feature_extractor(
_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and audios is not None:
a :Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.tokenizer.model_input_names
a :str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 94
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "cpu" , lowerCamelCase__ = None ):
"""simple docstring"""
lowercase__ : Any = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase__ : int = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[Any] = src_path
torch.save(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 121
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 228
|
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase = None ) -> list[list[str]]:
a = word_bank or []
# create a table
a = len(__lowerCamelCase ) + 1
a = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 228
| 1
|
'''simple docstring'''
UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _snake_case ( ) -> None:
"""simple docstring"""
lowerCAmelCase = input("""Enter message: """ )
lowerCAmelCase = input("""Enter key [alphanumeric]: """ )
lowerCAmelCase = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase = """encrypt"""
lowerCAmelCase = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase = """decrypt"""
lowerCAmelCase = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\n{mode.title()}ed message:' )
print(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encrypt""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return translate_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decrypt""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_SCREAMING_SNAKE_CASE )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 0
else:
translated.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 187
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A_ )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
| 187
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =XGLMTokenizer
lowercase_ : Tuple =XGLMTokenizerFast
lowercase_ : Optional[Any] =True
lowercase_ : Dict =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XGLMTokenizer(A__ ,keep_accents=A__)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<pad>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__) ,A__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__) ,A__)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<s>''')
self.assertEqual(vocab_keys[1] ,'''<pad>''')
self.assertEqual(len(A__) ,1_0_0_8)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_8)
def A__ ( self):
lowercase = XGLMTokenizer(A__ ,keep_accents=A__)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(A__)
self.assertListEqual(
A__ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
lowercase = tokenizer.convert_ids_to_tokens(A__)
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def A__ ( self):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
def A__ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A__ ,f.name)
lowercase = XGLMTokenizer(f.name ,keep_accents=A__)
lowercase = pickle.dumps(A__)
pickle.loads(A__)
def A__ ( self):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = '''I was born in 92000, and this is falsé.'''
lowercase = tokenizer.tokenize(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
@slow
def A__ ( self):
lowercase = '''Hello World!'''
lowercase = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(A__ ,self.big_tokenizer.encode(A__))
@slow
def A__ ( self):
lowercase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
lowercase = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(A__ ,self.big_tokenizer.encode(A__))
@slow
def A__ ( self):
# fmt: off
lowercase = {
'''input_ids''': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ ,model_name='''facebook/xglm-564M''' ,padding=A__ ,)
| 101
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = None ) -> list[list[str]]:
'''simple docstring'''
lowercase : str = word_bank or []
# create a table
lowercase : int = len(_UpperCAmelCase ) + 1
lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowercase : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 255
| 0
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[str]:
if nth_term == "":
return [""]
__lowerCAmelCase: List[str] = int(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = int(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: list[str] = []
for temp in range(int(__SCREAMING_SNAKE_CASE ) ):
series.append(F"1 / {pow(temp + 1 , int(__SCREAMING_SNAKE_CASE ) )}" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Enter the last number (nth term) of the P-Series"))
__A = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 350
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__A = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
__A = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__A = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowercase_ ( self : int , UpperCamelCase__ : Dict)-> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
__lowerCAmelCase: List[str] = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCAmelCase: Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCAmelCase: Tuple = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCAmelCase: Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
__lowerCAmelCase: Dict = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> str:
'''simple docstring'''
__lowerCAmelCase: str = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__)
return {"scores": scores}
| 108
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(UpperCamelCase__ )
or left < -len(UpperCamelCase__ )
or right >= len(UpperCamelCase__ )
or right < -len(UpperCamelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
lowercase_ = (left + right) >> 1 # the middle
lowercase_ = find_max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # find max in range[left, mid]
lowercase_ = find_max(UpperCamelCase__ , mid + 1 , UpperCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 136
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: float ):
return 0.0
def lowerCamelCase__ ( A__ : np.ndarray , A__ : int ):
__lowerCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCamelCase__ ( A__ : FilterType , A__ : int ):
__lowerCamelCase = 512
__lowerCamelCase = [1] + [0] * (size - 1)
__lowerCamelCase = [filter_type.process(A__ ) for item in inputs]
__lowerCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase = np.abs(np.fft.fft(A__ ) )
__lowerCamelCase = 20 * np.logaa(A__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowerCamelCase = get_bounds(A__ , A__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(A__ )
plt.show()
def lowerCamelCase__ ( A__ : FilterType , A__ : int ):
__lowerCamelCase = 512
__lowerCamelCase = [1] + [0] * (size - 1)
__lowerCamelCase = [filter_type.process(A__ ) for item in inputs]
__lowerCamelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase = np.angle(np.fft.fft(A__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(A__ , -2 * pi ) )
plt.show()
| 355
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ='▁'
_lowerCamelCase ={'vocab_file': 'sentencepiece.bpe.model'}
_lowerCamelCase ={
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCamelCase ={
'facebook/mbart-large-50-one-to-many-mmt': 1_0_2_4,
}
# fmt: off
_lowerCamelCase =['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class A__ ( SCREAMING_SNAKE_CASE__):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : List[int] = []
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Dict = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : Dict = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
lowerCamelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Dict = len(self.sp_model )
lowerCamelCase : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_ )
}
lowerCamelCase : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase : str = src_lang if src_lang is not None else '''en_XX'''
lowerCamelCase : Dict = self.lang_code_to_id[self._src_lang]
lowerCamelCase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
lowerCamelCase : Tuple = self.__dict__.copy()
lowerCamelCase : int = None
return state
def __setstate__( self , __magic_name__ ):
lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , __magic_name__ ):
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def UpperCamelCase__ ( self , __magic_name__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : Dict = self.sp_model.PieceToId(lowerCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , __magic_name__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : List[Any] = []
lowerCamelCase : Union[str, Any] = ''''''
lowerCamelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Dict = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
lowerCamelCase : List[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
lowerCamelCase : List[str] = [1] * len(self.prefix_tokens )
lowerCamelCase : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_ )) + ([0] * len(lowerCAmelCase_ )) + suffix_ones
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase : Union[str, Any] = src_lang
lowerCamelCase : List[Any] = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
lowerCamelCase : int = self.convert_tokens_to_ids(lowerCAmelCase_ )
lowerCamelCase : Optional[int] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = "en_XX" , __magic_name__ = None , __magic_name__ = "ro_RO" , **__magic_name__ , ):
lowerCamelCase : Any = src_lang
lowerCamelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Union[str, Any] = self.lang_code_to_id[src_lang]
lowerCamelCase : List[Any] = [self.cur_lang_code_id]
lowerCamelCase : Tuple = [self.eos_token_id]
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
lowerCamelCase : List[Any] = [self.cur_lang_code_id]
lowerCamelCase : Dict = [self.eos_token_id]
| 287
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: Any = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_A: Optional[int] = DatasetInfosDict.from_directory(a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: int = str(a )
dataset_info.write_to_directory(a )
_A: str = DatasetInfo.from_directory(a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a , '''dataset_info.json''' ) )
def lowerCamelCase__ ( ) -> Any:
_A: int = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_A: Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_A: str = yaml.safe_dump(a )
_A: Optional[int] = yaml.safe_load(a )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ) -> int:
_A: Union[str, Any] = DatasetInfo()
_A: Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[int]:
_A: Optional[int] = str(a )
dataset_infos_dict.write_to_directory(a )
_A: Union[str, Any] = DatasetInfosDict.from_directory(a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A: Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A: Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a , '''README.md''' ) )
| 121
| 0
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__(_lowercase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
lowercase__ : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__UpperCamelCase ) )
lowercase__ : Tuple = torch.manual_seed(__UpperCamelCase )
lowercase__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case ( self : List[str] ):
lowercase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : str = self.get_dummy_inputs()
lowercase__ : Union[str, Any] = pipe(**__UpperCamelCase ).images
lowercase__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self : int ):
lowercase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase__ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : Any = self.get_dummy_inputs()
lowercase__ : Any = pipe(**__UpperCamelCase ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Tuple = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self : Any ):
lowercase__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : List[str] = self.get_dummy_inputs()
lowercase__ : Dict = pipe(**__UpperCamelCase ).images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Any = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self : Dict ):
lowercase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase__ : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : Dict = pipe(**__UpperCamelCase ).images
lowercase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Dict = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self : Tuple ):
lowercase__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowercase__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : Dict = self.get_dummy_inputs()
lowercase__ : Dict = pipe(**__UpperCamelCase ).images
lowercase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int] = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = ort.SessionOptions()
lowercase__ : Union[str, Any] = False
return options
def snake_case ( self : Any ):
lowercase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ : int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowercase__ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : Any = "A fantasy landscape, trending on artstation"
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
lowercase__ : Optional[Any] = output.images
lowercase__ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ : Tuple = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self : Tuple ):
lowercase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ : int = init_image.resize((128, 128) )
lowercase__ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
lowercase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase__ : Any = "A fantasy landscape, trending on artstation"
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
lowercase__ : Optional[int] = output.images
lowercase__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ : Any = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121
| 0
|
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( _A , _A , _A=None , **_A ):
'''simple docstring'''
snake_case_ = [x.strip() for x in open(_A ).readlines()]
snake_case_ = [x.strip() for x in open(_A ).readlines()][: len(_A )]
snake_case_ = calculate_rouge(_A , _A , **_A )
if save_path is not None:
save_json(_A , _A , indent=_A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 187
|
from timeit import timeit
lowercase__ : Union[str, Any] = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(_A ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A ) // 2
snake_case_ = len(_A )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if len(_A ) <= 2:
return True
if s[0] == s[len(_A ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return s == s[::-1]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = f"all({name}(key) is value for key, value in test_data.items())"
snake_case_ = f"from __main__ import test_data, {name}"
snake_case_ = 500000
snake_case_ = timeit(stmt=_A , setup=_A , number=_A )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 187
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = IFPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : str=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
UpperCamelCase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
UpperCamelCase , UpperCamelCase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase = None
UpperCamelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase_ )
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase_ )
UpperCamelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCamelCase_ )
UpperCamelCase = pipe_a(
prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , original_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
def lowercase( ) -> List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 165
|
def lowercase( UpperCamelCase_ ) -> list[list]:
'''simple docstring'''
UpperCamelCase = current_set.copy()
for row_index, row in enumerate(UpperCamelCase_ ):
UpperCamelCase = row[0]
for column_index, column in enumerate(UpperCamelCase_ ):
if magnitude == 0:
UpperCamelCase = column
continue
UpperCamelCase = column / magnitude
# Subtract to cancel term
UpperCamelCase = current_set[0]
UpperCamelCase = [first_row]
UpperCamelCase = current_set[1::]
for row in current_set:
UpperCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCamelCase_ )
continue
for column_index in range(len(UpperCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase = final_set[0]
UpperCamelCase = []
UpperCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase = simplify(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , UpperCamelCase_ )
UpperCamelCase = resultant
return final_set
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
UpperCamelCase = len(UpperCamelCase_ ) + 1
if any(len(UpperCamelCase_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCamelCase_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase = data_set.copy()
UpperCamelCase = []
for row_index, row in enumerate(UpperCamelCase_ ):
if 0 not in row:
UpperCamelCase = data_set.pop(UpperCamelCase_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , UpperCamelCase_ )
UpperCamelCase = data_set.copy()
UpperCamelCase = simplify(UpperCamelCase_ )
UpperCamelCase = simplified[::-1]
UpperCamelCase = []
for row in simplified:
UpperCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase = row.copy()[: len(UpperCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCamelCase_ ) == 0:
solutions.append(0 )
continue
UpperCamelCase = temp_row[1::]
UpperCamelCase = temp_row[::-1]
for column_index, column in enumerate(UpperCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCamelCase_ )
UpperCamelCase = []
for item in solutions:
final.append(float(round(UpperCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 165
| 1
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """T5Config"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : List[str] = jnp.zeros_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase__ : List[str] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return shifted_input_ids
class A__ ( __magic_name__ ):
lowercase = "mt5"
lowercase = MTaConfig
class A__ ( __magic_name__ ):
lowercase = "mt5"
lowercase = MTaConfig
class A__ ( __magic_name__ ):
lowercase = "mt5"
lowercase = MTaConfig
| 212
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="llama"
a : List[str] =["past_key_values"]
def __init__( self , snake_case__=32_000 , snake_case__=4_096 , snake_case__=11_008 , snake_case__=32 , snake_case__=32 , snake_case__=None , snake_case__="silu" , snake_case__=2_048 , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=1 , snake_case__=False , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = num_key_value_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Any = rms_norm_eps
lowerCAmelCase : List[Any] = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Optional[Any] = self.rope_scaling.get("type" , snake_case__ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 108
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'van'
def __init__( self ,__UpperCamelCase=224 ,__UpperCamelCase=3 ,__UpperCamelCase=[7, 3, 3, 3] ,__UpperCamelCase=[4, 2, 2, 2] ,__UpperCamelCase=[64, 128, 320, 512] ,__UpperCamelCase=[3, 3, 12, 3] ,__UpperCamelCase=[8, 8, 4, 4] ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=1e-2 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : List[str] = image_size
lowercase_ : Any = num_channels
lowercase_ : Dict = patch_sizes
lowercase_ : Optional[int] = strides
lowercase_ : Any = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Any = mlp_ratios
lowercase_ : List[str] = hidden_act
lowercase_ : Tuple = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : int = layer_scale_init_value
lowercase_ : List[Any] = drop_path_rate
lowercase_ : Any = dropout_rate
| 355
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 0
|
"""simple docstring"""
def lowercase (snake_case__ : Dict ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
lowerCAmelCase = head.next, head
while fast and fast.next:
lowerCAmelCase = fast.next.next
lowerCAmelCase = slow.next
lowerCAmelCase = slow.next
lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase = None
while second:
lowerCAmelCase = second.next
lowerCAmelCase = node
lowerCAmelCase = second
lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase = node.next
lowerCAmelCase = head.next
return True
def lowercase (snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase = head
while fast and fast.next:
lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase = [slow.val]
while slow.next:
lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase = cur.next
return True
def lowercase (snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not head or not head.next:
return True
lowerCAmelCase = {}
lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
lowerCAmelCase = [pos]
lowerCAmelCase = head.next
pos += 1
lowerCAmelCase = pos - 1
lowerCAmelCase = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
lowerCAmelCase = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 155
|
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """blip_2_vision_model"""
def __init__( self : Dict , lowercase_ : Union[str, Any]=1_408 , lowercase_ : Dict=6_144 , lowercase_ : Optional[int]=39 , lowercase_ : Union[str, Any]=16 , lowercase_ : List[str]=224 , lowercase_ : Tuple=14 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Any=0.0_0001 , lowercase_ : List[str]=0.0 , lowercase_ : Optional[Any]=1E-10 , lowercase_ : str=True , **lowercase_ : List[Any] , ) -> Dict:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[int] = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Any = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : int = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """blip_2_qformer"""
def __init__( self : List[str] , lowercase_ : Optional[int]=30_522 , lowercase_ : Union[str, Any]=768 , lowercase_ : Optional[Any]=12 , lowercase_ : Dict=12 , lowercase_ : Dict=3_072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Any=0.02 , lowercase_ : List[str]=1E-12 , lowercase_ : Optional[int]=0 , lowercase_ : List[Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : Dict=1_408 , **lowercase_ : List[str] , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Tuple = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Optional[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """blip-2"""
UpperCAmelCase_ : str = True
def __init__( self : int , lowercase_ : Tuple=None , lowercase_ : Tuple=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=32 , **lowercase_ : List[Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Dict = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : Tuple = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : List[str] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : List[str] = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : Tuple = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Tuple = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : List[str] = self.text_config.tie_word_embeddings
UpperCAmelCase : Optional[int] = self.text_config.is_encoder_decoder
UpperCAmelCase : List[str] = num_query_tokens
UpperCAmelCase : Any = self.vision_config.hidden_size
UpperCAmelCase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Dict = 1.0
UpperCAmelCase : Optional[int] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : Dict , ) -> int:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[Any] = self.vision_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.qformer_config.to_dict()
UpperCAmelCase : List[Any] = self.text_config.to_dict()
UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 280
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """maskformer"""
UpperCAmelCase_ : Union[str, Any] = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase_ : Union[str, Any] = ["""resnet""", """swin"""]
UpperCAmelCase_ : Optional[int] = ["""detr"""]
def __init__( self : Union[str, Any] , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : float = 0.1 , lowercase_ : bool = False , lowercase_ : Optional[Dict] = None , lowercase_ : Optional[Dict] = None , lowercase_ : float = 0.02 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 20.0 , lowercase_ : Optional[bool] = None , **lowercase_ : int , ) -> Optional[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = backbone_config.pop('model_type' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : Tuple = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : str = config_class.from_dict(lowercase_ )
UpperCAmelCase : int = backbone_config
UpperCAmelCase : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase : Union[str, Any] = fpn_feature_size
UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : List[str] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : int = cross_entropy_weight
UpperCAmelCase : Tuple = dice_weight
UpperCAmelCase : int = mask_weight
UpperCAmelCase : Any = use_auxiliary_loss
UpperCAmelCase : Tuple = no_object_weight
UpperCAmelCase : str = output_auxiliary_logits
UpperCAmelCase : Dict = self.decoder_config.encoder_attention_heads
UpperCAmelCase : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Any , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : Optional[Any] ) -> str:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, any]:
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : List[str] = self.backbone_config.to_dict()
UpperCAmelCase : Any = self.decoder_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 280
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 51
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( a = True , *a , **a ) -> Optional[Any]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_A: Optional[Any] = False
if main_process_only:
_A: Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*a , **a , disable=a )
| 121
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
try:
UpperCamelCase__ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ : Union[str, Any] = strtobool(SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCamelCase : Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False)
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
"""simple docstring"""
if test_case is None:
return partial(SCREAMING_SNAKE_CASE , version=SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version('''>=''' , SCREAMING_SNAKE_CASE ) , F"test requires torch version >= {version}" )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(SCREAMING_SNAKE_CASE )
__UpperCamelCase : Tuple = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(SCREAMING_SNAKE_CASE )
class __magic_name__ ( unittest.TestCase):
A: Optional[int] = True
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = tempfile.mkdtemp()
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ) -> Any:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCamelCase__ )
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[mock.Mock, List[mock.Mock]] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = mocks if isinstance(lowerCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = AcceleratorState()
UpperCamelCase__ : str = tensor[None].clone().to(state.device )
UpperCamelCase__ : Optional[int] = gather(SCREAMING_SNAKE_CASE ).cpu()
UpperCamelCase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , SCREAMING_SNAKE_CASE ):
return False
return True
class __magic_name__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = returncode
UpperCamelCase__ : List[Any] = stdout
UpperCamelCase__ : Tuple = stderr
async def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
while True:
UpperCamelCase__ : Optional[Any] = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE )
else:
break
async def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : str=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[int] = []
def tee(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict="" ):
UpperCamelCase__ : str = line.decode('''utf-8''' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE )
if not quiet:
print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , file=SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Tuple=180 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : int=True ):
"""simple docstring"""
UpperCamelCase__ : int = asyncio.get_event_loop()
UpperCamelCase__ : int = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , stdin=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , quiet=SCREAMING_SNAKE_CASE , echo=SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : str = ''' '''.join(SCREAMING_SNAKE_CASE )
if result.returncode > 0:
UpperCamelCase__ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __magic_name__ ( __lowerCAmelCase):
pass
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
try:
UpperCamelCase__ : List[str] = subprocess.check_output(SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ):
UpperCamelCase__ : str = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 51
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = ["image_processor", "tokenizer"]
A: int = "FlavaImageProcessor"
A: List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.image_processor
def __call__( self : int , lowerCamelCase__ : Optional[ImageInput] = None , lowerCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[str] , ) -> Any:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if images is not None:
UpperCamelCase__ : Optional[int] = self.image_processor(
lowerCamelCase__ , return_image_mask=lowerCamelCase__ , return_codebook_pixels=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 51
| 1
|
"""simple docstring"""
import math
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (
not isinstance(snake_case__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A_ : Dict = HfApi()
A_ : List[str] = {}
# fmt: off
A_ : Dict = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
A_ : List[Any] = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
A_ : str = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
A_ : List[Any] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
A_ : Tuple = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
A_ : List[str] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
A_ : List[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
A_ : Dict = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
A_ : Tuple = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
A_ : str = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
A_ : str = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
A_ : int = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
A_ : int = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
A_ : str = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
A_ : Optional[int] = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
A_ : List[str] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A_ : Dict = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
A_ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
A_ : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A_ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A_ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
A_ : Optional[int] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!')
| 165
| 1
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = eval_examples
lowerCAmelCase_ : List[str] = post_process_function
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Dataset] = None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : str = "eval" , **SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase_ : Tuple = gen_kwargs.copy()
lowerCAmelCase_ : Union[str, Any] = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
lowerCAmelCase_ : str = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
lowerCAmelCase_ : Optional[int] = gen_kwargs
lowerCAmelCase_ : int = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase_ : Optional[int] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ : int = self.compute_metrics
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : List[str] = time.time()
lowerCAmelCase_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase_ : Dict = eval_loop(
SCREAMING_SNAKE_CASE_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , )
finally:
lowerCAmelCase_ : str = compute_metrics
lowerCAmelCase_ : Dict = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase_ : List[str] = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.compute_metrics(SCREAMING_SNAKE_CASE_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase_ : Union[str, Any] = metrics.pop(SCREAMING_SNAKE_CASE_ )
metrics.update(output.metrics )
else:
lowerCAmelCase_ : Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase_ : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE_ )
return metrics
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str = "test" , **SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Union[str, Any] = gen_kwargs.copy()
lowerCAmelCase_ : int = self.get_test_dataloader(SCREAMING_SNAKE_CASE_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ : List[Any] = self.compute_metrics
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Any = time.time()
lowerCAmelCase_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase_ : Optional[Any] = eval_loop(
SCREAMING_SNAKE_CASE_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , )
finally:
lowerCAmelCase_ : Dict = compute_metrics
lowerCAmelCase_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase_ : int = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'predict' )
lowerCAmelCase_ : Any = self.compute_metrics(SCREAMING_SNAKE_CASE_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase_ : Optional[int] = metrics.pop(SCREAMING_SNAKE_CASE_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE_ )
| 289
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase__ : List[Any] = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """masked_bert"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : int=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-12 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Optional[int]="constant" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : str = pruning_method
lowerCAmelCase_ : Optional[Any] = mask_init
lowerCAmelCase_ : int = mask_scale
| 289
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(__UpperCamelCase )
print("""Building PyTorch model from configuration: {}""".format(str(__UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE__ : Dict = RemBertModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(__UpperCamelCase ) )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a :str = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 132
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
class A__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
__magic_name__ = 1
@register_to_config
def __init__( self , __snake_case = 2_0_0_0 , __snake_case = 0.15 , __snake_case = 0.01 , __snake_case = 1348.0 , __snake_case = 1E-5 , __snake_case = 1 , ):
# standard deviation of the initial noise distribution
snake_case = sigma_max
# setable values
snake_case = None
self.set_sigmas(__snake_case , __snake_case , __snake_case , __snake_case )
def a_ ( self , __snake_case , __snake_case = None ):
return sample
def a_ ( self , __snake_case , __snake_case = None , __snake_case = None ):
snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case = torch.linspace(1 , __snake_case , __snake_case , device=__snake_case )
def a_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None ):
snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__snake_case , __snake_case )
snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case = torch.exp(torch.linspace(math.log(__snake_case ) , math.log(__snake_case ) , __snake_case ) )
snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def a_ ( self , __snake_case , __snake_case ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , __snake_case = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case = timesteps.to(self.discrete_sigmas.device )
snake_case = self.discrete_sigmas[timesteps].to(sample.device )
snake_case = self.get_adjacent_sigma(__snake_case , __snake_case ).to(sample.device )
snake_case = torch.zeros_like(__snake_case )
snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case = diffusion.unsqueeze(-1 )
snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=__snake_case , device=sample.device , dtype=sample.dtype )
snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__snake_case , prev_sample_mean=__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case = None , __snake_case = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=__snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case = step_size.unsqueeze(-1 )
snake_case = sample + step_size * model_output
snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = timesteps.to(original_samples.device )
snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__snake_case ) * sigmas[:, None, None, None]
)
snake_case = noise + original_samples
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 213
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 213
| 1
|
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280
|
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = params
_lowerCAmelCase : Dict = np.array(snake_case__ )
_lowerCAmelCase : Optional[Any] = np.array([len(snake_case__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case__ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def a ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.params.max_model_input_size
_lowerCAmelCase : int = self.lengths > max_len
logger.info(F'Splitting {sum(snake_case__ )} too long sequences.' )
def divide_chunks(snake_case__ , snake_case__ ):
return [l[i : i + n] for i in range(0 , len(snake_case__ ) , snake_case__ )]
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : str = []
if self.params.mlm:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_lowerCAmelCase , _lowerCAmelCase : str = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowerCAmelCase : str = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowerCAmelCase : Optional[Any] = np.insert(snake_case__ , 0 , snake_case__ )
if sub_s[-1] != sep_id:
_lowerCAmelCase : str = np.insert(snake_case__ , len(snake_case__ ) , snake_case__ )
assert len(snake_case__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(snake_case__ )
new_tok_ids.extend(snake_case__ )
new_lengths.extend([len(snake_case__ ) for l in sub_seqs] )
_lowerCAmelCase : int = np.array(snake_case__ )
_lowerCAmelCase : Any = np.array(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = len(self )
_lowerCAmelCase : List[str] = self.lengths > 11
_lowerCAmelCase : Optional[Any] = self.token_ids[indices]
_lowerCAmelCase : List[Any] = self.lengths[indices]
_lowerCAmelCase : Optional[int] = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def a ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCAmelCase : Any = self.params.special_tok_ids['unk_token']
_lowerCAmelCase : Tuple = len(self )
_lowerCAmelCase : Any = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowerCAmelCase : Tuple = (unk_occs / self.lengths) < 0.5
_lowerCAmelCase : Union[str, Any] = self.token_ids[indices]
_lowerCAmelCase : Tuple = self.lengths[indices]
_lowerCAmelCase : Union[str, Any] = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def a ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [t[0] for t in batch]
_lowerCAmelCase : str = [t[1] for t in batch]
assert len(snake_case__ ) == len(snake_case__ )
# Max for paddings
_lowerCAmelCase : Dict = max(snake_case__ )
# Pad token ids
if self.params.mlm:
_lowerCAmelCase : Any = self.params.special_tok_ids['pad_token']
else:
_lowerCAmelCase : Any = self.params.special_tok_ids['unk_token']
_lowerCAmelCase : Tuple = [list(t.astype(snake_case__ ) ) + [pad_idx] * (max_seq_len_ - len(snake_case__ )) for t in token_ids]
assert len(tk_ ) == len(snake_case__ )
assert all(len(snake_case__ ) == max_seq_len_ for t in tk_ )
_lowerCAmelCase : Optional[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowerCAmelCase : List[str] = torch.tensor(snake_case__ ) # (bs)
return tk_t, lg_t
| 25
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25
| 1
|
def A (__A : float , __A : float , __A : int ) -> float:
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__A , __A ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ : Dict = logging.get_logger(__name__)
class _a ( __lowerCAmelCase ):
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 355
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __a ( _UpperCamelCase: Callable[[int | float], int | float] , _UpperCamelCase: int | float , _UpperCamelCase: int | float , _UpperCamelCase: int = 100 , ) -> float:
"""simple docstring"""
_snake_case = x_start
_snake_case = fnc(_UpperCamelCase )
_snake_case = 0.0
for _ in range(_UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_snake_case = (x_end - x_start) / steps + xa
_snake_case = fnc(_UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_snake_case = xa
_snake_case = fxa
return area
if __name__ == "__main__":
def __a ( _UpperCamelCase: Any ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCamelCase_ : Optional[int] = 10
while i <= 100000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 142
| 0
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-m""" ,"""--pretrained_model_name_or_path""" ,type=lowercase ,default=lowercase ,required=lowercase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,)
parser.add_argument(
"""-c""" ,"""--caption""" ,type=lowercase ,default="""robotic cat with wings""" ,help="""Text used to generate images.""" ,)
parser.add_argument(
"""-n""" ,"""--images_num""" ,type=lowercase ,default=4 ,help="""How much images to generate.""" ,)
parser.add_argument(
"""-s""" ,"""--seed""" ,type=lowercase ,default=42 ,help="""Seed for random process.""" ,)
parser.add_argument(
"""-ci""" ,"""--cuda_id""" ,type=lowercase ,default=0 ,help="""cuda_id.""" ,)
_UpperCAmelCase = parser.parse_args()
return args
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if not len(lowercase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
_UpperCAmelCase , _UpperCAmelCase = imgs[0].size
_UpperCAmelCase = Image.new("""RGB""" ,size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase = grid.size
for i, img in enumerate(lowercase ):
grid.paste(lowercase ,box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( lowercase ,lowercase="robotic cat with wings" ,lowercase=7.5 ,lowercase=50 ,lowercase=1 ,lowercase=42 ,):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(pipeline.device ).manual_seed(lowercase )
_UpperCAmelCase = pipeline(
lowercase ,guidance_scale=lowercase ,num_inference_steps=lowercase ,generator=lowercase ,num_images_per_prompt=lowercase ,).images
_UpperCAmelCase = int(math.sqrt(lowercase ) )
_UpperCAmelCase = image_grid(lowercase ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
UpperCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
UpperCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
UpperCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
UpperCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
UpperCAmelCase__ = unet.to(torch.device("""cuda""", args.cuda_id))
UpperCAmelCase__ = pipeline.to(unet.device)
UpperCAmelCase__ , UpperCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
UpperCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 289
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Dict=36 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=6 , __lowerCAmelCase : str=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_hidden_groups
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any ):
_UpperCAmelCase = AlbertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = AlbertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , sentence_order_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = AlbertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = AlbertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = AlbertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = True
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AlbertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AlbertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
| 289
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass):
_a : List[str] = FLAX_MODEL_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass):
_a : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass):
_a : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass):
_a : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass):
_a : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass):
_a : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass):
_a : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass):
_a : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 211
| 1
|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowercase_ ):
lowercase = (DDIMParallelScheduler,)
lowercase = (('eta', 0.0), ('num_inference_steps', 5_0))
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__UpperCamelCase )
return config
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Tuple = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(**__UpperCamelCase )
lowercase_ : List[Any] = scheduler_class(**__UpperCamelCase )
lowercase_ , lowercase_ : List[Any] = 10, 0.0
lowercase_ : Tuple = self.dummy_model()
lowercase_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for t in scheduler.timesteps:
lowercase_ : Union[str, Any] = model(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = scheduler.step(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
lowercase_ : Tuple = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
lowercase_ : Optional[Any] = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase ,beta_end=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase ,prediction_type=__UpperCamelCase ,sample_max_value=__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__UpperCamelCase ,num_inference_steps=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__UpperCamelCase ,eta=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : int = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = self.scheduler_classes[0]
lowercase_ : Tuple = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**__UpperCamelCase )
lowercase_ , lowercase_ : str = 10, 0.0
scheduler.set_timesteps(__UpperCamelCase )
lowercase_ : str = self.dummy_model()
lowercase_ : List[Any] = self.dummy_sample_deter
lowercase_ : Optional[Any] = self.dummy_sample_deter + 0.1
lowercase_ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowercase_ : Optional[int] = samplea.shape[0]
lowercase_ : Dict = torch.stack([samplea, samplea, samplea] ,dim=0 )
lowercase_ : List[Any] = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 ,__UpperCamelCase )
lowercase_ : List[Any] = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
lowercase_ : Tuple = scheduler.batch_step_no_noise(__UpperCamelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__UpperCamelCase )
lowercase_ : Any = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : Optional[int] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[int] = self.full_loop()
lowercase_ : Union[str, Any] = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : Union[str, Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = self.full_loop(prediction_type='v_prediction' )
lowercase_ : List[str] = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : List[Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self.full_loop(set_alpha_to_one=__UpperCamelCase ,beta_start=0.01 )
lowercase_ : Optional[Any] = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : Optional[int] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.full_loop(set_alpha_to_one=__UpperCamelCase ,beta_start=0.01 )
lowercase_ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
lowercase_ : List[Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 213
|
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : str = data
# Initialize hash values
lowercase_ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase_ : Tuple = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> bytes:
'''simple docstring'''
lowercase_ : str = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowercase_ : str = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ : Any = list(struct.unpack('>16L' ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase_ : str = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowercase_ : int = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowercase_ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
lowercase_ : Union[str, Any] = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase_ : Optional[int] = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
lowercase_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase_ : str = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowercase_ : Any = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
import hashlib
lowercase_ : Union[str, Any] = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Any = parser.parse_args()
lowercase_ : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : str = f.read()
else:
lowercase_ : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 213
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
__lowercase = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_).raw).convert("RGB")
return image
def _A ( UpperCamelCase_ : List[Any]) -> Optional[Any]:
'''simple docstring'''
__lowercase = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",))
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight"""))
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias"""))
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight"))
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias"))
# fmt: on
return rename_keys
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any, UpperCamelCase_ : List[Any]) -> Any:
'''simple docstring'''
__lowercase = dct.pop(UpperCamelCase_)
__lowercase = val
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : int) -> int:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
__lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""")
__lowercase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""")
# next, set bias in the state dict
__lowercase = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_, requires_grad=UpperCamelCase_), v_bias))
__lowercase = qkv_bias
def _A ( UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
__lowercase = 364 if "coco" in model_name else 224
__lowercase = InstructBlipVisionConfig(image_size=UpperCamelCase_).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__lowercase = TaConfig.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
__lowercase = TaConfig.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1).to_dict()
elif "vicuna-7b" in model_name:
__lowercase = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf", vocab_size=32001).to_dict()
elif "vicuna-13b" in model_name:
__lowercase = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf", vocab_size=32001).to_dict()
else:
raise ValueError("Model name not supported")
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__lowercase = InstructBlipQFormerConfig(vocab_size=30523).to_dict()
__lowercase = InstructBlipConfig(vision_config=UpperCamelCase_, text_config=UpperCamelCase_, qformer_config=UpperCamelCase_)
return config, image_size
@torch.no_grad()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : int=None, UpperCamelCase_ : Any=False) -> Tuple:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained("bert-base-uncased", truncation_side="left")
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"})
if "t5" in model_name:
__lowercase = TaTokenizerFast.from_pretrained("google/flan-t5-xl", truncation_side="left")
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__lowercase = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b", truncation_side="left", bos_token="</s>", unk_token="</s>")
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
__lowercase ,__lowercase = get_blipa_config(UpperCamelCase_)
__lowercase = InstructBlipForConditionalGeneration(UpperCamelCase_).eval()
__lowercase = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
__lowercase ,__lowercase = model_name_to_original[model_name]
# load original model
print("Loading original model...")
__lowercase = "cuda:1" if torch.cuda.is_available() else "cpu"
__lowercase = "cuda:2" if torch.cuda.is_available() else "cpu"
__lowercase ,__lowercase ,__lowercase = load_model_and_preprocess(
name=UpperCamelCase_, model_type=UpperCamelCase_, is_eval=UpperCamelCase_, device=UpperCamelCase_)
original_model.eval()
print("Done!")
# update state dict keys
__lowercase = original_model.state_dict()
__lowercase = create_rename_keys(UpperCamelCase_)
for src, dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowercase = state_dict.pop(UpperCamelCase_)
if key.startswith("Qformer.bert"):
__lowercase = key.replace("Qformer.bert", "qformer")
if "attention.self" in key:
__lowercase = key.replace("self", "attention")
if "llm_proj" in key:
__lowercase = key.replace("llm_proj", "language_projection")
if "t5_proj" in key:
__lowercase = key.replace("t5_proj", "language_projection")
if key.startswith("llm_model"):
__lowercase = key.replace("llm_model", "language_model")
if key.startswith("t5"):
__lowercase = key.replace("t5", "language")
__lowercase = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_, UpperCamelCase_)
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase_, strict=UpperCamelCase_)
__lowercase = load_demo_image()
__lowercase = "What is unusual about this image?"
# create processor
__lowercase = BlipImageProcessor(
size={"height": image_size, "width": image_size}, image_mean=UpperCamelCase_, image_std=UpperCamelCase_)
__lowercase = InstructBlipProcessor(
image_processor=UpperCamelCase_, tokenizer=UpperCamelCase_, qformer_tokenizer=UpperCamelCase_, )
__lowercase = processor(images=UpperCamelCase_, text=UpperCamelCase_, return_tensors="pt").to(UpperCamelCase_)
# make sure processor creates exact same pixel values
__lowercase = vis_processors["eval"](UpperCamelCase_).unsqueeze(0).to(UpperCamelCase_)
__lowercase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device), UpperCamelCase_)
original_model.to(UpperCamelCase_)
hf_model.to(UpperCamelCase_)
with torch.no_grad():
if "vicuna" in model_name:
__lowercase = original_model({"image": original_pixel_values, "text_input": [prompt]}).logits
__lowercase = hf_model(**UpperCamelCase_).logits
else:
__lowercase = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]}).logits
__lowercase = tokenizer("\n", return_tensors="pt").input_ids.to(UpperCamelCase_)
__lowercase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100)
__lowercase = hf_model(**UpperCamelCase_, labels=UpperCamelCase_).logits
print("First values of original logits:", original_logits[0, :3, :3])
print("First values of HF logits:", logits[0, :3, :3])
# assert values
assert original_logits.shape == logits.shape
__lowercase = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device), UpperCamelCase_, atol=UpperCamelCase_)
print("Looks ok!")
print("Generating with original model...")
__lowercase = original_model.generate({"image": original_pixel_values, "prompt": prompt}, num_beams=5)
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model...")
__lowercase = hf_model.generate(
**UpperCamelCase_, do_sample=UpperCamelCase_, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__lowercase = 2
print("Original generation:", UpperCamelCase_)
__lowercase = processor.batch_decode(UpperCamelCase_, skip_special_tokens=UpperCamelCase_)
__lowercase = [text.strip() for text in output_text]
print("HF generation:", UpperCamelCase_)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_)
hf_model.save_pretrained(UpperCamelCase_)
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""")
hf_model.push_to_hub(F"""Salesforce/{model_name}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
_a = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 144
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_a = pytest.mark.integration
_a = {'comet'}
_a = importlib.util.find_spec('fairseq') is not None
_a = {'code_eval'}
_a = os.name == 'nt'
_a = {'bertscore', 'frugalscore', 'perplexity'}
_a = importlib.util.find_spec('transformers') is not None
def _A ( UpperCamelCase_ : Dict) -> Any:
'''simple docstring'''
@wraps(UpperCamelCase_)
def wrapper(self : Dict, UpperCamelCase_ : Dict):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"")
else:
test_case(self, UpperCamelCase_)
return wrapper
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_)
def wrapper(self : int, UpperCamelCase_ : str):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"")
else:
test_case(self, UpperCamelCase_)
return wrapper
def _A ( UpperCamelCase_ : Tuple) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_)
def wrapper(self : Optional[int], UpperCamelCase_ : Optional[Any]):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"")
else:
test_case(self, UpperCamelCase_)
return wrapper
def _A ( ) -> str:
'''simple docstring'''
__lowercase = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase ,lowercase ,lowercase )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Tuple = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
__lowercase = "[...]"
__lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics", UpperCAmelCase__ ) ).module_path )
__lowercase = datasets.load.import_main_class(metric_module.__name__, dataset=UpperCAmelCase__ )
# check parameters
__lowercase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCAmelCase__, metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowercase = doctest.testmod(UpperCAmelCase__, verbose=UpperCAmelCase__, raise_on_error=UpperCAmelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@slow
def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = "[...]"
__lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics", UpperCAmelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowercase = doctest.testmod(UpperCAmelCase__, verbose=UpperCAmelCase__, raise_on_error=UpperCAmelCase__ )
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@contextmanager
def _lowercase ( self : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase__ ):
yield
else:
yield
@contextmanager
def _lowercase ( self : List[Any] ):
def load_local_metric(UpperCAmelCase__ : Any, *UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : Any ):
return load_metric(os.path.join("metrics", UpperCAmelCase__ ), *UpperCAmelCase__, **UpperCAmelCase__ )
with patch("datasets.load_metric" ) as mock_load_metric:
__lowercase = load_local_metric
yield
@classmethod
def _lowercase ( cls : Optional[Any], UpperCAmelCase__ : List[Any] ):
def wrapper(UpperCAmelCase__ : Tuple ):
__lowercase = contextmanager(UpperCAmelCase__ )
__lowercase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt")
def _A ( UpperCamelCase_ : Any) -> Optional[Any]:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : Tuple, UpperCAmelCase__ : Tuple ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor") as mock_create_predictor:
__lowercase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore")
def _A ( UpperCamelCase_ : Tuple) -> int:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : Tuple, UpperCamelCase_ : str, *UpperCamelCase_ : Optional[Any], **UpperCamelCase_ : Dict):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model"), patch(
"bert_score.scorer.bert_cos_score_idf") as mock_bert_cos_score_idf:
__lowercase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet")
def _A ( UpperCamelCase_ : Tuple) -> List[Any]:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple):
class _lowerCAmelCase :
"""simple docstring"""
def _lowercase ( self : str, UpperCAmelCase__ : int, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Dict ):
assert len(UpperCAmelCase__ ) == 2
__lowercase = [0.19, 0.92]
return scores, sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model") as mock_download_model:
__lowercase = None
with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint:
__lowercase = load_from_checkpoint
yield
def _A ( ) -> Tuple:
'''simple docstring'''
__lowercase = load_metric(os.path.join("metrics", "seqeval"))
__lowercase = "ERROR"
__lowercase = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_, match=re.escape(UpperCamelCase_)):
metric.compute(predictions=[], references=[], scheme=UpperCamelCase_)
| 144
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowercase_ ( _snake_case ):
create_state_space_tree(_snake_case ,[] ,0 )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if index == len(_snake_case ):
print(_snake_case )
return
create_state_space_tree(_snake_case ,_snake_case ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_snake_case ,_snake_case ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 25
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __snake_case :
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : Dict=13 , _snake_case : int=7 , _snake_case : Dict=False , _snake_case : List[str]=True , _snake_case : int=False , _snake_case : int=True , _snake_case : Union[str, Any]=33 , _snake_case : int=32 , _snake_case : Dict=5 , _snake_case : Union[str, Any]=4 , _snake_case : Tuple=37 , _snake_case : List[str]="gelu" , _snake_case : Tuple=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Union[str, Any]=512 , _snake_case : Union[str, Any]=16 , _snake_case : str=2 , _snake_case : Dict=0.0_2 , _snake_case : str=3 , _snake_case : Optional[Any]=4 , _snake_case : Any=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = EsmModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : str , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = EsmForMaskedLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Union[str, Any] , _snake_case : str , _snake_case : Any , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = EsmForTokenClassification(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = EsmModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case)
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = EsmModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ = EsmEmbeddings(config=_snake_case)
UpperCAmelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]])
UpperCAmelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
])
UpperCAmelCase_ = create_position_ids_from_input_ids(_snake_case , model.padding_idx)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case)))
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase_ = EsmEmbeddings(config=_snake_case)
UpperCAmelCase_ = torch.empty(2 , 4 , 30)
UpperCAmelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions])
UpperCAmelCase_ = embeddings.create_position_ids_from_inputs_embeds(_snake_case)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case)))
@unittest.skip('''Esm does not support embedding resizing''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
pass
@require_torch
class __snake_case ( a ):
@slow
def lowerCamelCase ( self : Dict):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
model.eval()
UpperCAmelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase_ = model(_snake_case)[0]
UpperCAmelCase_ = 33
UpperCAmelCase_ = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = torch.tensor(
[[[8.9_2_1_5, -10.5898, -6.4_6_7_1], [-6.3_9_6_7, -13.9114, -1.1_2_1_2], [-7.7_8_1_2, -13.9516, -3.7_4_0_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4))
@slow
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
model.eval()
UpperCAmelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
UpperCAmelCase_ = model(_snake_case)[0]
# compare the actual values for a slice.
UpperCAmelCase_ = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4))
| 364
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7
| 0
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
a = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=1 , _UpperCAmelCase : int="binary" , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]="warn" , ):
_A = recall_score(
_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase , pos_label=_UpperCAmelCase , average=_UpperCAmelCase , sample_weight=_UpperCAmelCase , zero_division=_UpperCAmelCase , )
return {"recall": float(_UpperCAmelCase ) if score.size == 1 else score}
| 315
|
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_00, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 142
| 0
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ) as metadata_file:
UpperCAmelCase_= json.load(lowerCAmelCase_ )
UpperCAmelCase_= LukeConfig(use_entity_aware_attention=lowerCAmelCase_ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_= torch.load(lowerCAmelCase_ ,map_location="""cpu""" )
# Load the entity vocab file
UpperCAmelCase_= load_entity_vocab(lowerCAmelCase_ )
UpperCAmelCase_= RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_= AddedToken("""<ent>""" ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ )
UpperCAmelCase_= AddedToken("""<ent2>""" ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_= LukeTokenizer.from_pretrained(lowerCAmelCase_ )
# Initialize the embeddings of the special tokens
UpperCAmelCase_= state_dict["""embeddings.word_embeddings.weight"""]
UpperCAmelCase_= word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
UpperCAmelCase_= word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
UpperCAmelCase_= torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_= F"""encoder.layer.{layer_index}.attention.self."""
UpperCAmelCase_= state_dict[prefix + matrix_name]
UpperCAmelCase_= state_dict[prefix + matrix_name]
UpperCAmelCase_= state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_= state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCAmelCase_= entity_emb[entity_vocab["""[MASK]"""]]
UpperCAmelCase_= LukeModel(config=lowerCAmelCase_ ).eval()
UpperCAmelCase_, UpperCAmelCase_= model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
if not (len(lowerCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowerCAmelCase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
UpperCAmelCase_= LukeTokenizer.from_pretrained(lowerCAmelCase_ ,task="""entity_classification""" )
UpperCAmelCase_= (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
UpperCAmelCase_= (39, 42)
UpperCAmelCase_= tokenizer(lowerCAmelCase_ ,entity_spans=[span] ,add_prefix_space=lowerCAmelCase_ ,return_tensors="""pt""" )
UpperCAmelCase_= model(**lowerCAmelCase_ )
# Verify word hidden states
if model_size == "large":
UpperCAmelCase_= torch.Size((1, 42, 10_24) )
UpperCAmelCase_= torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
UpperCAmelCase_= torch.Size((1, 42, 7_68) )
UpperCAmelCase_= torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCAmelCase_ ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCAmelCase_= torch.Size((1, 1, 10_24) )
UpperCAmelCase_= torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
UpperCAmelCase_= torch.Size((1, 1, 7_68) )
UpperCAmelCase_= torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowerCAmelCase_ ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase_ ) )
model.save_pretrained(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= {}
with open(lowerCAmelCase_ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_, UpperCAmelCase_= line.rstrip().split("""\t""" )
UpperCAmelCase_= index
return entity_vocab
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 361
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : WhisperForConditionalGeneration , __UpperCAmelCase : WhisperProcessor , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
UpperCAmelCase_= self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=16_000 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Union[str, Any] , ) -> Any:
UpperCAmelCase_= self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="""pt""" , sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
UpperCAmelCase_= self.speech_model.generate(__UpperCAmelCase , max_length=480_000 )
UpperCAmelCase_= self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get prompt text embeddings
UpperCAmelCase_= self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase_= text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_= self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase_= text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_= self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= text_embeddings.shape
UpperCAmelCase_= text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
UpperCAmelCase_= text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_= guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_= 42
if negative_prompt is None:
UpperCAmelCase_= [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase_= negative_prompt
UpperCAmelCase_= text_input_ids.shape[-1]
UpperCAmelCase_= self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""pt""" , )
UpperCAmelCase_= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_= uncond_embeddings.shape[1]
UpperCAmelCase_= uncond_embeddings.repeat(1 , __UpperCAmelCase , 1 )
UpperCAmelCase_= uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_= torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_= (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_= text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_= torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="""cpu""" , dtype=__UpperCAmelCase ).to(
self.device )
else:
UpperCAmelCase_= torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase_= latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_= self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_= """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_= {}
if accepts_eta:
UpperCAmelCase_= eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_= self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_, UpperCAmelCase_= noise_pred.chunk(2 )
UpperCAmelCase_= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= 1 / 0.18_215 * latents
UpperCAmelCase_= self.vae.decode(__UpperCAmelCase ).sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 277
| 0
|
'''simple docstring'''
def lowerCAmelCase ():
"""simple docstring"""
return [list(range(1_000 - i , -1_000 - i , -1)) for i in range(1_000)]
lowercase_ = generate_large_matrix()
lowercase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase (__A):
"""simple docstring"""
assert all(row == sorted(__A , reverse=__A) for row in grid)
assert all(list(__A) == sorted(__A , reverse=__A) for col in zip(*__A))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
_a = len(__A) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_a = (left + right) // 2
_a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_a = mid + 1
else:
_a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__A)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
_a = len(grid[0])
for i in range(len(__A)):
_a = find_negative_index(grid[i][:bound])
total += bound
return (len(__A) * len(grid[0])) - total
def lowerCAmelCase (__A):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0])
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
for row in grid:
for i, number in enumerate(__A):
if number < 0:
total += len(__A) - i
break
return total
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''')
_a = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_a = timeit(F'''{func}(grid=grid)''' , setup=__A , number=500)
print(F'''{func}() took {time:0.4f} seconds''')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 211
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : int
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
return [s[i:] + s[:i] for i in range(len(__A))]
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter s type must be str.''')
if not s:
raise ValueError('''The parameter s must not be empty.''')
_a = all_rotations(__A)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(__A),
}
return response
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if not isinstance(__A , __A):
raise TypeError('''The parameter bwt_string type must be str.''')
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''')
try:
_a = int(__A)
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''')
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''')
if idx_original_string >= len(__A):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''')
_a = [''''''] * len(__A)
for _ in range(len(__A)):
for i in range(len(__A)):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = "Provide a string that I will generate its BWT transform: "
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase_ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 211
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = list(lowerCAmelCase_ )
__lowercase : str = list(lowerCAmelCase_ )
__lowercase : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
__lowercase : Dict = """_"""
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : list[str] ):
__lowercase : Tuple = []
while True:
__lowercase : Union[str, Any] = ["""$"""] * len(lowerCAmelCase_ )
__lowercase : int = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
__lowercase : int = compare_string(binary[i] , binary[j] )
if k is False:
__lowercase : Union[str, Any] = """*"""
__lowercase : Tuple = """*"""
temp.append("""X""" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
__lowercase : Union[str, Any] = list(set(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Sequence[float] ):
__lowercase : List[Any] = []
for minterm in minterms:
__lowercase : Any = """"""
for _ in range(lowerCAmelCase_ ):
__lowercase : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = list(lowerCAmelCase_ )
__lowercase : List[str] = list(lowerCAmelCase_ )
__lowercase : Dict = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[str] ):
__lowercase : Dict = []
__lowercase : List[str] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
__lowercase : Optional[Any] = j
if count == 1:
__lowercase : Optional[int] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
__lowercase : Tuple = 0
temp.append(prime_implicants[i] )
while True:
__lowercase : Dict = 0
__lowercase : List[str] = -1
__lowercase : Any = 0
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
__lowercase : List[Any] = count_n
__lowercase : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
__lowercase : List[str] = 0
def snake_case_ ( lowerCAmelCase_ : list[str] , lowerCAmelCase_ : list[str] ):
__lowercase : Optional[Any] = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Dict = prime_implicants[i].count("""_""" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
__lowercase : Union[str, Any] = 1
return chart
def snake_case_ ( ):
__lowercase : List[str] = int(input("""Enter the no. of variables\n""" ) )
__lowercase : Any = [
float(lowerCAmelCase_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__lowercase : Dict = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Union[str, Any] = check(lowerCAmelCase_ )
print("""Prime Implicants are:""" )
print(lowerCAmelCase_ )
__lowercase : Any = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Essential Prime Implicants are:""" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 361
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Dict = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A__ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
A__ : Tuple = {'facebook/blenderbot_small-90M': 512}
def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]:
lowerCamelCase_ : Union[str, Any] =set()
lowerCamelCase_ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ : List[Any] =char
lowerCamelCase_ : Union[str, Any] =set(lowerCamelCase__ )
return pairs
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :int = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Dict = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[Any]="__start__" , snake_case__ : Optional[Any]="__end__" , snake_case__ : Any="__unk__" , snake_case__ : List[str]="__null__" , **snake_case__ : Union[str, Any] , ):
super().__init__(unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowerCamelCase_ : Any =json.load(snake_case__ )
lowerCamelCase_ : Any ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
lowerCamelCase_ : Optional[int] =merges_handle.read().split("\n" )[1:-1]
lowerCamelCase_ : List[str] =[tuple(merge.split() ) for merge in merges]
lowerCamelCase_ : str =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCamelCase_ : str ={}
@property
def UpperCAmelCase__ ( self : Any ):
return len(self.encoder )
def UpperCAmelCase__ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str ):
if token in self.cache:
return self.cache[token]
lowerCamelCase_ : str =re.sub("([.,!?()])" , r" \1" , snake_case__ )
lowerCamelCase_ : str =re.sub("(')" , r" \1 " , snake_case__ )
lowerCamelCase_ : List[Any] =re.sub(r"\s{2,}" , " " , snake_case__ )
if "\n" in token:
lowerCamelCase_ : List[Any] =token.replace("\n" , " __newln__" )
lowerCamelCase_ : int =token.split(" " )
lowerCamelCase_ : Tuple =[]
for token in tokens:
if not len(snake_case__ ):
continue
lowerCamelCase_ : Optional[int] =token.lower()
lowerCamelCase_ : Dict =tuple(snake_case__ )
lowerCamelCase_ : List[str] =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCamelCase_ : Optional[int] =get_pairs(snake_case__ )
if not pairs:
words.append(snake_case__ )
continue
while True:
lowerCamelCase_ : str =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ : Any =bigram
lowerCamelCase_ : Union[str, Any] =[]
lowerCamelCase_ : Optional[Any] =0
while i < len(snake_case__ ):
try:
lowerCamelCase_ : Dict =word.index(snake_case__ , snake_case__ )
new_word.extend(word[i:j] )
lowerCamelCase_ : Tuple =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ : Any =tuple(snake_case__ )
lowerCamelCase_ : Tuple =new_word
if len(snake_case__ ) == 1:
break
else:
lowerCamelCase_ : List[str] =get_pairs(snake_case__ )
lowerCamelCase_ : Optional[Any] ="@@ ".join(snake_case__ )
lowerCamelCase_ : Tuple =word[:-4]
lowerCamelCase_ : Dict =word
words.append(snake_case__ )
return " ".join(snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : str ):
lowerCamelCase_ : Any =[]
lowerCamelCase_ : Union[str, Any] =re.findall(r"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str ):
lowerCamelCase_ : str =token.lower()
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : int , snake_case__ : int ):
return self.decoder.get(snake_case__ , self.unk_token )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
lowerCamelCase_ : List[str] =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : List[Any] =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : List[Any] =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
lowerCamelCase_ : Optional[int] =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase_ : Optional[int] =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
| 144
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowercase__ ( unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = JukeboxTokenizer
_UpperCAmelCase :List[Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase__ ( self : Dict ):
import torch
lowerCamelCase_ : Any =JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowerCamelCase_ : List[Any] =tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCamelCase_ : Union[str, Any] =[
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self : str ):
import torch
lowerCamelCase_ : Any =JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowerCamelCase_ : Optional[int] =tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCamelCase_ : Union[str, Any] =[
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 144
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="roc_bert"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=True , snake_case=0 , snake_case="absolute" , snake_case=None , snake_case=True , snake_case=True , snake_case=7_6_8 , snake_case=9_1_0 , snake_case=5_1_2 , snake_case=2_4_8_5_8 , snake_case=True , **snake_case , ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =vocab_size
_UpperCAmelCase : List[Any] =max_position_embeddings
_UpperCAmelCase : List[str] =hidden_size
_UpperCAmelCase : int =num_hidden_layers
_UpperCAmelCase : Tuple =num_attention_heads
_UpperCAmelCase : List[Any] =intermediate_size
_UpperCAmelCase : Union[str, Any] =hidden_act
_UpperCAmelCase : str =hidden_dropout_prob
_UpperCAmelCase : int =attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] =initializer_range
_UpperCAmelCase : Any =type_vocab_size
_UpperCAmelCase : Tuple =layer_norm_eps
_UpperCAmelCase : Optional[int] =use_cache
_UpperCAmelCase : Union[str, Any] =enable_pronunciation
_UpperCAmelCase : Optional[Any] =enable_shape
_UpperCAmelCase : Optional[int] =pronunciation_embed_dim
_UpperCAmelCase : Optional[Any] =pronunciation_vocab_size
_UpperCAmelCase : Any =shape_embed_dim
_UpperCAmelCase : str =shape_vocab_size
_UpperCAmelCase : Optional[Any] =concat_input
_UpperCAmelCase : List[str] =position_embedding_type
_UpperCAmelCase : str =classifier_dropout
super().__init__(pad_token_id=snake_case , **snake_case)
| 368
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , snake_case , **snake_case) -> str:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str ={}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : List[Any] =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =load_image(snake_case)
_UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Union[str, Any] =candidate_labels
_UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels]
_UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case)
_UpperCAmelCase : Any =[text_inputs]
return inputs
def lowerCAmelCase ( self , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels')
_UpperCAmelCase : Tuple =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , snake_case):
_UpperCAmelCase : Any =text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : str =text_inputs[0][0]
_UpperCAmelCase : Any =self.model(**snake_case , **snake_case)
_UpperCAmelCase : List[str] ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =model_outputs.pop('candidate_labels')
_UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : Union[str, Any] =probs.tolist()
if not isinstance(snake_case , snake_case):
_UpperCAmelCase : Union[str, Any] =[scores]
elif self.framework == "tf":
_UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1)
_UpperCAmelCase : str =probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : List[str] =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0])
]
return result
| 242
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=True , _a=33 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = EsmModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase = model(lowercase_ , attention_mask=lowercase_ )
lowerCamelCase = model(lowercase_ )
lowerCamelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = EsmForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_labels
lowerCamelCase = EsmForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = False
__UpperCamelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = ()
__UpperCamelCase = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = EsmModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = EsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase = EsmEmbeddings(config=lowercase_ )
lowerCamelCase = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase = create_position_ids_from_input_ids(lowercase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_ , lowercase_ ) ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase = EsmEmbeddings(config=lowercase_ )
lowerCamelCase = torch.empty(2 , 4 , 30 )
lowerCamelCase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase = embeddings.create_position_ids_from_inputs_embeds(lowercase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_ , lowercase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_torch
class __magic_name__ ( _UpperCAmelCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
with torch.no_grad():
lowerCamelCase = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(lowercase_ )[0]
lowerCamelCase = 33
lowerCamelCase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
lowerCamelCase = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
with torch.no_grad():
lowerCamelCase = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase = model(lowercase_ )[0]
# compare the actual values for a slice.
lowerCamelCase = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 291
|
from timeit import timeit
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
A__ = 0
while number:
number &= number - 1
result += 1
return result
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
A__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _snake_case( ) -> None:
'''simple docstring'''
def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None:
A__ = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' )
A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' )
A__ = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7
| 0
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( __snake_case ):
lowerCAmelCase__ = "vision-encoder-decoder"
lowerCAmelCase__ = True
def __init__( self: Union[str, Any] ,**__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_lowerCamelCase : List[str] = kwargs.pop("encoder" )
_lowerCamelCase : Any = encoder_config.pop("model_type" )
_lowerCamelCase : str = kwargs.pop("decoder" )
_lowerCamelCase : Tuple = decoder_config.pop("model_type" )
_lowerCamelCase : int = AutoConfig.for_model(lowerCamelCase_ ,**lowerCamelCase_ )
_lowerCamelCase : str = AutoConfig.for_model(lowerCamelCase_ ,**lowerCamelCase_ )
_lowerCamelCase : Any = True
@classmethod
def _lowercase ( cls: Tuple ,__lowerCAmelCase: PretrainedConfig ,__lowerCAmelCase: PretrainedConfig ,**__lowerCAmelCase: str ):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_lowerCamelCase : List[str] = True
_lowerCamelCase : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase_ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Any = self.encoder.to_dict()
_lowerCamelCase : str = self.decoder.to_dict()
_lowerCamelCase : Union[str, Any] = self.__class__.model_type
return output
class A_ ( __snake_case ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class A_ ( __snake_case ):
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = OrderedDict()
_lowerCamelCase : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowerCamelCase : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowerCamelCase : List[str] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: "PreTrainedTokenizerBase" ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional["TensorType"] = None ,):
'''simple docstring'''
import torch
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase : Union[str, Any] = super().generate_dummy_inputs(
lowerCamelCase_ ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,is_pair=lowerCamelCase_ ,framework=lowerCamelCase_ )
_lowerCamelCase : Dict = dummy_input["""input_ids"""].shape
_lowerCamelCase : Tuple = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase : Union[str, Any] = dummy_input.pop("input_ids" )
_lowerCamelCase : List[str] = dummy_input.pop("attention_mask" )
_lowerCamelCase : List[str] = torch.zeros(lowerCamelCase_ )
return common_inputs
class A_ ( __snake_case ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase_ )
def _lowercase ( self: int ,__lowerCAmelCase: PretrainedConfig ,__lowerCAmelCase: PretrainedConfig ,__lowerCAmelCase: str = "default" ):
'''simple docstring'''
_lowerCamelCase : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase_ ,lowerCamelCase_ )
| 365
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale
| 340
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_snake_case = "▁"
class lowercase ( lowerCAmelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
_a = BarthezTokenizer
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
_A : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , **_snake_case , )
_A : List[Any] = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A : Dict = [self.cls_token_id]
_A : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : Any = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 26
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a_ :List[Any] = logging.get_logger(__name__)
a_ :List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
a_ :List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase_ (A : Dict ):
snake_case__ : Optional[Any] = {}
with open(A , 'r' ) as file:
for line_number, line in enumerate(A ):
snake_case__ : Dict = line.strip()
if line:
snake_case__ : int = line.split()
snake_case__ : List[str] = line_number
snake_case__ : Dict = words[0]
snake_case__ : Optional[Any] = value
return result
def lowercase_ (A : int , A : int , A : Optional[int] , A : Optional[Any] , A : Tuple ):
for attribute in key.split('.' ):
snake_case__ : Optional[int] = getattr(A , A )
snake_case__ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]]
snake_case__ : Dict = 'param'
if weight_type is not None and weight_type != "param":
snake_case__ : Union[str, Any] = getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
snake_case__ : Optional[int] = hf_pointer
for attribute in hf_param_name.split('.' ):
snake_case__ : Optional[Any] = getattr(A , A )
snake_case__ : Dict = shape_pointer.shape
# let's reduce dimension
snake_case__ : List[Any] = value[0]
else:
snake_case__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Any = value
elif weight_type == "weight_g":
snake_case__ : List[Any] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
snake_case__ : int = getattr(A , A )
snake_case__ : Optional[int] = value
else:
snake_case__ : Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase_ (A : Tuple , A : List[Any] , A : int , A : str , A : Tuple ):
snake_case__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
snake_case__ : List[str] = PARAM_MAPPING[full_name.split('.' )[-1]]
snake_case__ : str = 'param'
if weight_type is not None and weight_type != "param":
snake_case__ : int = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case__ : Any = '.'.join([key, hf_param_name] )
else:
snake_case__ : Dict = key
snake_case__ : List[str] = value if 'lm_head' in full_key else value[0]
a_ :List[str] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase_ (A : str , A : Optional[Any] , A : Optional[Any]=None , A : List[str]=None ):
snake_case__ : Optional[int] = False
for key, mapped_key in MAPPING.items():
snake_case__ : Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ : Optional[int] = True
if "*" in mapped_key:
snake_case__ : List[Any] = name.split(A )[0].split('.' )[-2]
snake_case__ : Union[str, Any] = mapped_key.replace('*' , A )
if "weight_g" in name:
snake_case__ : Tuple = 'weight_g'
elif "weight_v" in name:
snake_case__ : List[str] = 'weight_v'
elif "bias" in name:
snake_case__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[int] = 'weight'
else:
snake_case__ : str = None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def lowercase_ (A : Optional[Any] , A : Dict , A : Optional[int] ):
snake_case__ : Dict = []
snake_case__ : Tuple = fairseq_model.state_dict()
snake_case__ : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : str = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ : Any = True
else:
snake_case__ : Dict = load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase_ (A : Dict , A : Optional[Any] , A : Tuple , A : str , A : List[str] ):
snake_case__ : List[Any] = full_name.split('conv_layers.' )[-1]
snake_case__ : List[str] = name.split('.' )
snake_case__ : List[Any] = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def lowercase_ (A : Union[str, Any] , A : str , A : Tuple=None , A : List[str]=None , A : Any=True , A : Optional[int]=False ):
if config_path is not None:
snake_case__ : List[Any] = WavaVecaConfig.from_pretrained(A )
else:
snake_case__ : List[Any] = WavaVecaConfig()
if is_seq_class:
snake_case__ : Dict = read_txt_into_dict(A )
snake_case__ : Any = idalabel
snake_case__ : Union[str, Any] = WavaVecaForSequenceClassification(A )
snake_case__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
snake_case__ : str = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ : List[str] = target_dict.pad_index
snake_case__ : Optional[int] = target_dict.bos_index
snake_case__ : Optional[int] = target_dict.eos_index
snake_case__ : List[Any] = len(target_dict.symbols )
snake_case__ : str = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
snake_case__ : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ : Optional[Any] = 0
snake_case__ : Union[str, Any] = 1
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(A , A )
snake_case__ : List[Any] = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
snake_case__ : str = True if config.feat_extract_norm == 'layer' else False
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
snake_case__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
snake_case__ : str = WavaVecaForCTC(A )
else:
snake_case__ : int = WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
snake_case__ , snake_case__ , snake_case__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case__ : Tuple = argparse.Namespace(task='audio_pretraining' )
snake_case__ : str = fairseq.tasks.setup_task(A )
snake_case__ , snake_case__ , snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
snake_case__ : List[Any] = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
a_ :str = parser.parse_args()
a_ :Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 277
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCAmelCase ( a_: Sequence[float], a_: float ):
return sum(c * (x**i) for i, c in enumerate(a_ ) )
def __UpperCAmelCase ( a_: Sequence[float], a_: float ):
_UpperCAmelCase : str = 0.0
for coeff in reversed(a_ ):
_UpperCAmelCase : Tuple = result * x + coeff
return result
if __name__ == "__main__":
__a = (0.0, 0.0, 5.0, 9.3, 7.0)
__a = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = 10
_UpperCAmelCase : int = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_UpperCAmelCase : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(a_ ) ),
}, features=a_, )
return dataset
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a_ )
return filename
# FILE_CONTENT + files
__a = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt"
_UpperCAmelCase : Tuple = FILE_CONTENT
with open(a_, "w" ) as f:
f.write(a_ )
return filename
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" )
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import gzip
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_UpperCAmelCase : Any = bytes(a_, "utf-8" )
with gzip.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_UpperCAmelCase : str = bytes(a_, "utf-8" )
with lza.frame.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Any ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a_, "w" ) as archive:
archive.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: List[str] ):
import tarfile
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
import lzma
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_UpperCAmelCase : List[str] = bytes(a_, "utf-8" )
with lzma.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: Tuple ):
import zipfile
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_UpperCAmelCase : int = bytes(a_, "utf-8" )
with zstd.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml"
_UpperCAmelCase : Tuple = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a_, "w" ) as f:
f.write(a_ )
return filename
__a = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__a = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__a = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__a = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__a = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : str = datasets.Dataset.from_dict(a_ )
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a_ ) ) as con:
_UpperCAmelCase : List[Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str, a_: str ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a_, "rb" ) as f:
_UpperCAmelCase : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ):
_UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ):
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_UpperCAmelCase : Dict = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a_, "wb" ) as f:
_UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ )
_UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ )
writer.write_table(a_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : str = {"data": DATA}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ):
import gzip
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ):
import gzip
_UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ):
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : List[str] = ["0", "1", "2", "3"]
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Dict = ["0", "1", "2", "3"]
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = ["0", "1", "2", "3"]
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename("unsupported.ext" ) )
f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 17
| 1
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
lowercase = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
lowercase = nn.Parameter(snake_case__ )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : str ):
'''simple docstring'''
lowercase = np.asarray(weights[0] )
lowercase = np.asarray(weights[1] )
lowercase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = np.asarray(weights[0] )
lowercase = np.asarray(weights[1] )
lowercase = np.asarray(weights[2] )
lowercase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] ):
'''simple docstring'''
lowercase = weights[0][0][0]
lowercase = np.asarray(layer_norm_a[0] )
lowercase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# lsh weights + output
lowercase = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ , torch_block.attention , snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ , torch_block.attention , snake_case__ )
# intermediate weighs
lowercase = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
lowercase = intermediate_weights[2]
# layernorm 2
lowercase = np.asarray(intermediate_weights[0][0] )
lowercase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# intermediate dense
lowercase = np.asarray(intermediate_weights[1][0] )
lowercase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
# intermediate out
lowercase = np.asarray(intermediate_weights[4][0] )
lowercase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Any , __snake_case : Tuple ):
'''simple docstring'''
lowercase = torch_model.reformer
# word embeds
lowercase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case__ ) , )
if isinstance(weights[3] , snake_case__ ):
lowercase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
lowercase = nn.Parameter(torch.tensor(snake_case__ ) )
lowercase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ , snake_case__ , snake_case__ )
# output layer norm
lowercase = np.asarray(weights[7][0] )
lowercase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# output embeddings
lowercase = np.asarray(weights[9][0] )
lowercase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = ReformerConfig.from_json_file(snake_case__ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ , 'rb' ) as f:
lowercase = pickle.load(snake_case__ )['weights']
set_model_weights_in_torch(snake_case__ , snake_case__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 220
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 0
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : str = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def A_( A : Optional[Any] , A : Union[str, Any] , A : List[str] , A : List[Any]):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.''')
if tokenizer_name is None:
UpperCamelCase = TOKENIZER_CLASSES
else:
UpperCamelCase = {tokenizer_name: getattr(A , tokenizer_name + 'Fast')}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''')
for tokenizer_name in tokenizer_names:
UpperCamelCase = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase = True
if checkpoint_name is None:
UpperCamelCase = list(tokenizer_class.max_model_input_sizes.keys())
else:
UpperCamelCase = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''')
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''')
# Load tokenizer
UpperCamelCase = tokenizer_class.from_pretrained(A , force_download=A)
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''')
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase , UpperCamelCase = checkpoint.split('/')
UpperCamelCase = os.path.join(A , A)
elif add_prefix:
UpperCamelCase = checkpoint
UpperCamelCase = dump_path
else:
UpperCamelCase = None
UpperCamelCase = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''')
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]:
UpperCamelCase = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
UpperCamelCase = file_path.split(A)[-1][0]
if next_char == "/":
UpperCamelCase = os.path.join(A , A)
UpperCamelCase = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''')
UpperCamelCase = tokenizer.save_pretrained(
A , legacy_format=A , filename_prefix=A)
logger.info(f'''=> File names {file_names}''')
for file_name in file_names:
if not file_name.endswith('tokenizer.json'):
os.remove(A)
logger.info(f'''=> removing {file_name}''')
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCAmelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """openai-gpt"""
lowerCAmelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=40478 , A_=512 , A_=768 , A_=12 , A_=12 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_="cls_index" , A_=True , A_=None , A_=True , A_=0.1 , **A_ , )-> List[str]:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = afn
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_first_dropout
UpperCamelCase = summary_proj_to_labels
super().__init__(**A_ )
| 251
| 1
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _A ( ) -> Optional[int]:
_lowercase : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
_lowercase : int = Dataset.from_dict(__UpperCAmelCase )
return dataset
class a__ ( a_ ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = get_dataset()
_lowercase : Optional[int] = make_duplicate_clusters(_UpperCamelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = get_dataset()
_lowercase : Optional[Any] = deduplicate_dataset(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) , 2 )
print(_UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , _UpperCamelCase )
| 250
|
"""simple docstring"""
from itertools import product
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
lowerCAmelCase__ : Union[str, Any] = sides_number
lowerCAmelCase__ : Optional[int] = max_face_number * dice_number
lowerCAmelCase__ : List[str] = [0] * (max_total + 1)
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Optional[int] = range(__UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(__UpperCAmelCase , repeat=__UpperCAmelCase ):
lowerCAmelCase__ : str = sum(__UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) -> float:
lowerCAmelCase__ : Union[str, Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase__ : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 9
lowerCAmelCase__ : Tuple = 4 * 9
lowerCAmelCase__ : Optional[int] = 6
for peter_total in range(__UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase__ : Tuple = (4**9) * (6**6)
lowerCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number
lowerCAmelCase__ : Optional[int] = round(__UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 242
| 0
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase :Any = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCamelCase :List[str] = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCamelCase :Dict = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase :Tuple = F"down_blocks.{i}.resnets.{j}."
lowerCamelCase :Union[str, Any] = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase :Any = F"down_blocks.{i}.attentions.{j}."
lowerCamelCase :List[Any] = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase :Union[str, Any] = F"up_blocks.{i}.resnets.{j}."
lowerCamelCase :Union[str, Any] = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase :List[Any] = F"up_blocks.{i}.attentions.{j}."
lowerCamelCase :Any = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase :str = F"down_blocks.{i}.downsamplers.0.conv."
lowerCamelCase :Any = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase :Optional[int] = F"up_blocks.{i}.upsamplers.0."
lowerCamelCase :int = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase :Any = '''mid_block.attentions.0.'''
lowerCamelCase :Union[str, Any] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase :List[str] = F"mid_block.resnets.{j}."
lowerCamelCase :Any = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
A_ : int = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
A_ : List[Any] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
A_ : int = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
A_ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
A_ : List[Any] = v
A_ : str = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase :Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase :Optional[Any] = F"encoder.down_blocks.{i}.resnets.{j}."
lowerCamelCase :Optional[int] = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase :int = F"down_blocks.{i}.downsamplers.0."
lowerCamelCase :Optional[Any] = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase :Dict = F"up_blocks.{i}.upsamplers.0."
lowerCamelCase :Optional[int] = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase :Tuple = F"decoder.up_blocks.{i}.resnets.{j}."
lowerCamelCase :Dict = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase :Union[str, Any] = F"mid_block.resnets.{i}."
lowerCamelCase :Dict = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase :Any = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
A_ : Optional[Any] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
A_ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = v
A_ : Optional[int] = {v: vae_state_dict[k] for k, v in mapping.items()}
A_ : Dict = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
A_ : str = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase :Dict = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCamelCase :Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase :Union[str, Any] = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase :Optional[Any] = {'''q''': 0, '''k''': 1, '''v''': 2}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = {}
A_ : Optional[int] = {}
A_ : Optional[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
A_ : Optional[int] = k[: -len(""".q_proj.weight""" )]
A_ : int = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
A_ : Optional[int] = [None, None, None]
A_ : List[str] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
A_ : Optional[int] = k[: -len(""".q_proj.bias""" )]
A_ : str = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
A_ : Union[str, Any] = [None, None, None]
A_ : str = v
continue
A_ : Optional[Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
A_ : Optional[int] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
A_ : Tuple = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
A_ : Dict = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
A_ : Any = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
A_ : Union[str, Any] = torch.cat(lowerCamelCase__ )
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCamelCase :Dict = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase :str = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase :int = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase :Any = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase :str = load_file(unet_path, device='''cpu''')
else:
lowerCamelCase :Dict = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCamelCase :Optional[int] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCamelCase :Dict = load_file(vae_path, device='''cpu''')
else:
lowerCamelCase :Any = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCamelCase :str = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCamelCase :List[Any] = load_file(text_enc_path, device='''cpu''')
else:
lowerCamelCase :Tuple = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCamelCase :Optional[int] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCamelCase :Tuple = convert_unet_state_dict(unet_state_dict)
lowerCamelCase :str = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase :Optional[Any] = convert_vae_state_dict(vae_state_dict)
lowerCamelCase :int = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase :Dict = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase :Optional[Any] = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCamelCase :List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase :Dict = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase :Union[str, Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase :str = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase :int = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase :int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase :Union[str, Any] = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 135
|
'''simple docstring'''
lowerCamelCase :Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCamelCase :Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def a ( ):
'''simple docstring'''
A_ : List[str] = """Morse code here!"""
print(lowerCamelCase__ )
A_ : str = encrypt(lowerCamelCase__ )
print(lowerCamelCase__ )
A_ : Dict = decrypt(lowerCamelCase__ )
print(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 135
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]:
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy"""
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return image
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : str = "bf16" if fpaa else None
snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ )
return model, params
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]:
'''simple docstring'''
snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ )
snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ )
snake_case : Dict = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ )
snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ )
snake_case : Optional[int] = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
| 59
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340
| 0
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
__lowerCAmelCase = '1'
__lowerCAmelCase = '0'
__lowerCAmelCase = '1'
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
__lowerCAmelCase = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
__lowerCAmelCase = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
__lowerCAmelCase = ort.RunOptions()
__lowerCAmelCase = 128
__lowerCAmelCase = 1
__lowerCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
__lowerCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
__lowerCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
__lowerCAmelCase = time.time()
__lowerCAmelCase = 2_000
__lowerCAmelCase = {}
for iter in range(max_iters):
__lowerCAmelCase = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 270
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = hf_hub_url(repo_id=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_SCREAMING_SNAKE_CASE )}"""
| 270
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17
| 1
|
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =features.copy() if features else default_expected_features
_lowerCAmelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if issubclass(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =text_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =[text_path]
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ) -> List[str]:
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
_lowerCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase =TextDatasetReader({"""train""": text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
_lowerCAmelCase =tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =features.copy() if features else default_expected_features
_lowerCAmelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase =TextDatasetReader({"""train""": text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
if split:
_lowerCAmelCase ={split: text_path}
else:
_lowerCAmelCase ="""train"""
_lowerCAmelCase ={"""train""": text_path, """test""": text_path}
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 341
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341
| 1
|
'''simple docstring'''
import math
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__( __UpperCamelCase: float = 0.1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__SCREAMING_SNAKE_CASE : int = re.compile(R'\s+')
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(__lowercase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
_snake_case : List[str] = [len(__lowercase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : List[str] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def snake_case (__lowercase , __lowercase=5 ) -> str:
'''simple docstring'''
_snake_case : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_snake_case : int = example["content"].splitlines()
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case (__lowercase , __lowercase=5 , __lowercase=0.05 ) -> List[str]:
'''simple docstring'''
_snake_case : Dict = ["unit tests", "test file", "configuration file"]
_snake_case : Union[str, Any] = example["content"].splitlines()
_snake_case : Any = 0
_snake_case : Dict = 0
# first test
for _, line in zip(range(__lowercase ) , __lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : List[str] = example["content"].count("\n" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case : int = ["def ", "class ", "for ", "while "]
_snake_case : Dict = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case (__lowercase , __lowercase=4 ) -> Any:
'''simple docstring'''
_snake_case : str = example["content"].splitlines()
_snake_case : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : Dict = tokenizer(example["content"] , truncation=__lowercase )["input_ids"]
_snake_case : Any = len(example["content"] ) / len(__lowercase )
return {"ratio": ratio}
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : int = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def snake_case (__lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if not check_uniques(__lowercase , __lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
with open(__lowercase , "rb" ) as f_in:
with gzip.open(str(__lowercase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase , __lowercase )
os.unlink(__lowercase )
# Settings
__SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser(PreprocessingArguments)
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
if args.num_workers is None:
__SCREAMING_SNAKE_CASE : int = multiprocessing.cpu_count()
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__SCREAMING_SNAKE_CASE : Tuple = time.time()
__SCREAMING_SNAKE_CASE : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__SCREAMING_SNAKE_CASE : str = set(ds.unique('hash'))
__SCREAMING_SNAKE_CASE : int = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__SCREAMING_SNAKE_CASE : str = time.time()
__SCREAMING_SNAKE_CASE : Dict = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
__SCREAMING_SNAKE_CASE : Any = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
__SCREAMING_SNAKE_CASE : Any = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
__SCREAMING_SNAKE_CASE : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__SCREAMING_SNAKE_CASE : Dict = str(data_dir / F'''file-{file_number+1:012}.json''')
__SCREAMING_SNAKE_CASE : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 356
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
_snake_case : Tuple = dataset_path.split("://" )[1]
return dataset_path
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def snake_case (__lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[int] = not is_remote_filesystem(__lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) )
else:
fs.mv(__lowercase , __lowercase , recursive=__lowercase )
def snake_case () -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case : List[str] = None
_snake_case : str = None
_snake_case : List[Any] = threading.Lock()
| 284
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[str] , _lowerCamelCase: Tuple ) -> str:
'''simple docstring'''
__lowerCamelCase : Tuple = 1.5
__lowerCamelCase : Dict = int(factor * num_class_images )
__lowerCamelCase : Any = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowerCamelCase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowerCamelCase : Tuple = client.query(text=_lowerCamelCase )
if len(_lowerCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
__lowerCamelCase : int = int(factor * num_images )
__lowerCamelCase : Tuple = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 , )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Any = tqdm(desc="downloading real regularization images" , total=_lowerCamelCase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
__lowerCamelCase : int = class_images[count]
count += 1
try:
__lowerCamelCase : Optional[int] = requests.get(images["url"] )
if img.status_code == 200:
__lowerCamelCase : Any = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : List[Any] = argparse.ArgumentParser("" , add_help=_lowerCamelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowerCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 135
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__A = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__A = [ord(letter) for letter in string.ascii_lowercase]
__A = {ord(char) for char in VALID_CHARS}
__A = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, ...] ) -> str | None:
'''simple docstring'''
__lowerCamelCase : str = ""
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
__lowerCamelCase : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowercase_ ( _lowerCamelCase: list[int] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
__lowerCamelCase : Tuple = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowercase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( _lowerCamelCase: str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
__lowerCamelCase : list[int]
__lowerCamelCase : list[str]
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
__lowerCamelCase : Any = [int(_lowerCamelCase ) for number in data.strip().split("," )]
__lowerCamelCase : Any = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
__lowerCamelCase : Dict = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
__lowerCamelCase : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 135
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __magic_name__ ( A , A , A , A , A ) -> List[Any]:
for attribute in key.split('.' ):
snake_case = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
snake_case = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __magic_name__ ( A , A ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case = None
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
snake_case = True
elif name.split('.' )[0] == "proj":
snake_case = fairseq_model.proj
snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(lowerCamelCase_ )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
snake_case = '''weight_g'''
elif "weight_v" in name:
snake_case = '''weight_v'''
elif "bias" in name:
snake_case = '''bias'''
elif "weight" in name:
snake_case = '''weight'''
else:
snake_case = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def __magic_name__ ( A , A , A , A , A ) -> str:
snake_case = full_name.split('conv_layers.' )[-1]
snake_case = name.split('.' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def __magic_name__ ( A ) -> List[str]:
snake_case = emb.weight.shape
snake_case = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
snake_case = emb.weight.data
return lin_layer
def __magic_name__ ( A ) -> List[str]:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
snake_case = f.readlines()
snake_case = [line.split(' ' )[0] for line in lines]
snake_case = len(lowerCamelCase_ )
snake_case = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __magic_name__ ( A , A , A , A , A , A , A , ) -> List[str]:
snake_case = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
snake_case = SpeechaTextaConfig.from_pretrained(
lowerCamelCase_ , vocab_size=lowerCamelCase_ , decoder_layers=lowerCamelCase_ , do_stable_layer_norm=lowerCamelCase_ )
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case = model[0].eval()
# set weights for wav2vec2 encoder
snake_case = WavaVecaModel(lowerCamelCase_ )
snake_case = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
snake_case = SpeechaTextaForCausalLM(lowerCamelCase_ )
snake_case = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
snake_case = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
snake_case = False
# add projection layer
snake_case = nn.Parameter(projection_layer.weight )
snake_case = nn.Parameter(projection_layer.bias )
snake_case = create_vocab_dict(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , 'vocab.json' ) , 'w' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
snake_case = SpeechaTextaTokenizer(os.path.join(lowerCamelCase_ , 'vocab.json' ) )
tokenizer.save_pretrained(lowerCamelCase_ )
snake_case = hf_wavavec.config.to_dict()
snake_case = tokenizer.pad_token_id
snake_case = tokenizer.bos_token_id
snake_case = tokenizer.eos_token_id
snake_case = '''speech_to_text_2'''
snake_case = '''wav2vec2'''
snake_case = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowerCAmelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 356
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332
| 0
|
from heapq import heappop, heappush
import numpy as np
def __magic_name__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
__lowerCamelCase , __lowerCamelCase = grid.shape
__lowerCamelCase = [-1, 1, 0, 0]
__lowerCamelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowerCamelCase , __lowerCamelCase = [(0, source)], set()
__lowerCamelCase = np.full((rows, cols) , np.inf )
__lowerCamelCase = 0
__lowerCamelCase = np.empty((rows, cols) , dtype=__lowerCAmelCase )
__lowerCamelCase = None
while queue:
((__lowerCamelCase) , (__lowerCamelCase)) = heappop(__lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowerCamelCase = []
while (x, y) != source:
path.append((x, y) )
__lowerCamelCase , __lowerCamelCase = predecessors[x, y]
path.append(__lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCAmelCase ) ):
__lowerCamelCase , __lowerCamelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowerCamelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCAmelCase , (dist + 1, (nx, ny)) )
__lowerCamelCase = dist + 1
__lowerCamelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270
| 1
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = sylvester(number - 1 )
UpperCamelCase__ : str = num - 1
UpperCamelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 196
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a ( A__ ):
_lowerCAmelCase : str = '''decision_transformer'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
_lowerCAmelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=17 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[int]=1_28 , SCREAMING_SNAKE_CASE : Dict=40_96 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="relu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=5_02_56 , SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = state_dim
UpperCamelCase__ : Optional[int] = act_dim
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = max_ep_len
UpperCamelCase__ : Optional[Any] = action_tanh
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : List[str] = n_positions
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : Dict = n_inner
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : List[str] = resid_pdrop
UpperCamelCase__ : Optional[int] = embd_pdrop
UpperCamelCase__ : Optional[Any] = attn_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = scale_attn_weights
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : List[str] = scale_attn_by_inverse_layer_idx
UpperCamelCase__ : Union[str, Any] = reorder_and_upcast_attn
UpperCamelCase__ : Optional[Any] = bos_token_id
UpperCamelCase__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 196
| 1
|
from PIL import Image
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
def brightness(_A ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_A )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__magic_name__: str = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 342
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342
| 1
|
'''simple docstring'''
from string import ascii_uppercase
lowerCAmelCase : Any = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase : Union[str, Any] = dict(enumerate(ascii_uppercase))
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = len(_A )
_lowerCAmelCase : str = 0
while True:
if x == i:
_lowerCAmelCase : Dict = 0
if len(_A ) == len(_A ):
break
key += key[i]
i += 1
return key
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ''
_lowerCAmelCase : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowerCAmelCase : List[str] = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = ''
_lowerCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowerCAmelCase : Optional[int] = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : str = 'THE GERMAN ATTACK'
_lowerCAmelCase : int = 'SECRET'
_lowerCAmelCase : str = generate_key(_A , _A )
_lowerCAmelCase : int = cipher_text(_A , _A )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(_A , _A )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 25
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25
| 1
|
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
lowerCAmelCase__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__(datasets.Metric ):
"""simple docstring"""
def snake_case ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Union[str, Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowercase__ : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowercase__ : str = evaluate(dataset=lowerCAmelCase_ , predictions=lowerCAmelCase_ )
return score
| 130
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
lowerCamelCase_ : CommonSchedulerState
# setable values
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : Optional[int] = None
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return cls(common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ )
@dataclass
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : DDPMSchedulerState
class __lowerCAmelCase ( _a, _a ):
lowerCamelCase_ : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase_ : jnp.dtype
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return True
@register_to_config
def __init__(self , __magic_name__ = 1000 , __magic_name__ = 0.0_001 , __magic_name__ = 0.02 , __magic_name__ = "linear" , __magic_name__ = None , __magic_name__ = "fixed_small" , __magic_name__ = True , __magic_name__ = "epsilon" , __magic_name__ = jnp.floataa , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = dtype
def lowerCamelCase (self , __magic_name__ = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
snake_case_ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case_ : Any = jnp.array(1.0 , dtype=self.dtype )
snake_case_ : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = () ) -> DDPMSchedulerState:
'''simple docstring'''
snake_case_ : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case_ : List[str] = (jnp.arange(0 , __magic_name__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__magic_name__ , timesteps=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ) -> int:
'''simple docstring'''
snake_case_ : Any = state.common.alphas_cumprod[t]
snake_case_ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case_ : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case_ : int = jnp.clip(__magic_name__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case_ : List[str] = jnp.log(jnp.clip(__magic_name__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
snake_case_ : str = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case_ : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case_ : str = variance
snake_case_ : Optional[Any] = state.common.betas[t]
snake_case_ : Optional[Any] = (predicted_variance + 1) / 2
snake_case_ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : Tuple = timestep
if key is None:
snake_case_ : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case_ , snake_case_ : Any = jnp.split(__magic_name__ , sample.shape[1] , axis=1 )
else:
snake_case_ : Optional[Any] = None
# 1. compute alphas, betas
snake_case_ : List[Any] = state.common.alphas_cumprod[t]
snake_case_ : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case_ : List[Any] = 1 - alpha_prod_t
snake_case_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
snake_case_ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ : Optional[int] = jnp.clip(__magic_name__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case_ : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case_ : List[Any] = jax.random.split(__magic_name__ , num=1 )
snake_case_ : Tuple = jax.random.normal(__magic_name__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__magic_name__ , __magic_name__ , predicted_variance=__magic_name__ ) ** 0.5) * noise
snake_case_ : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__magic_name__ , state=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 279
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_snake_case ) , 'Tatoeba directory does not exist.' )
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
self.resolver.convert_models(['''heb-eng'''] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 94
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( snake_case_ :List[Any] ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
__UpperCAmelCase = [image]
__UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase = torch.stack(snake_case_ )
return image
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowercase : str , _lowercase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def a ( self : int , _lowercase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
# get the original timestep using init_timestep
__UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
__UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ):
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = init_latents.shape
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print('''add noise to latents at timestep''' , _lowercase )
__UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
self.check_inputs(_lowercase )
# 2. Preprocess image
__UpperCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
__UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
__UpperCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
__UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
__UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 332
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__A : Tuple = False, False, False
@dataclass
class __A :
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[Any] = None
# Automatically constructed
lowerCAmelCase_ : int = "dict"
lowerCAmelCase_ : Any = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCAmelCase_ : Optional[Any] = field(default="Audio" , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self : List[Any] ):
return self.pa_type
def lowercase__ ( self : int , UpperCAmelCase_ : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(lowercase_ , lowercase_ ):
return {"bytes": None, "path": value}
elif isinstance(lowercase_ , lowercase_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase : List[str] = BytesIO()
sf.write(lowercase_ , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase : int = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCAmelCase : Tuple = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32767
lowerCAmelCase : Tuple = BytesIO(bytes() )
sf.write(lowercase_ , lowercase_ , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}." )
def lowercase__ ( self : Any , UpperCAmelCase_ : dict , UpperCAmelCase_ : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
lowerCAmelCase : str = (value["""path"""], BytesIO(value['bytes'] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
lowerCAmelCase : str = xsplitext(lowercase_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
if file is None:
lowerCAmelCase : Union[str, Any] = token_per_repo_id or {}
lowerCAmelCase : Optional[Any] = path.split('::' )[-1]
try:
lowerCAmelCase : Optional[int] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase : Optional[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase : int = None
with xopen(lowercase_ , 'rb' , use_auth_token=lowercase_ ) as f:
lowerCAmelCase : Optional[int] = sf.read(lowercase_ )
else:
lowerCAmelCase : List[str] = sf.read(lowercase_ )
lowerCAmelCase : int = array.T
if self.mono:
lowerCAmelCase : List[Any] = librosa.to_mono(lowercase_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase : Any = librosa.resample(lowercase_ , orig_sr=lowercase_ , target_sr=self.sampling_rate )
lowerCAmelCase : List[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase__ ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
lowerCAmelCase : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowerCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : Dict = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
lowerCAmelCase : Optional[Any] = pa.array([Audio().encode_example(lowercase_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
lowerCAmelCase : Optional[int] = storage.field('bytes' )
else:
lowerCAmelCase : int = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
lowerCAmelCase : Any = storage.field('path' )
else:
lowerCAmelCase : List[str] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowerCAmelCase : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowercase__ ( self : Dict , UpperCAmelCase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ : str ):
with xopen(lowercase_ , 'rb' ) as f:
lowerCAmelCase : Optional[Any] = f.read()
return bytes_
lowerCAmelCase : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
lowerCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
__lowerCAmelCase = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_INIT_CONFIGURATION
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : str = BertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowercase__: List[Any] = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
lowercase__: Optional[Any] = do_lower_case
lowercase__: List[Any] = strip_accents
lowercase__: Any = tokenize_chinese_chars
lowercase__: List[str] = normalizer_class(**lowerCAmelCase__ )
lowercase__: List[str] = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__: Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 196
|
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: str = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: Optional[Any] = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number >= 0: # Get binary representation of positive number
lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number
lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
lowercase__: Any = (
'1' + '0' * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : Optional[int] ="""ResNetConfig"""
# Base docstring
__lowerCAmelCase : Dict ="""microsoft/resnet-50"""
__lowerCAmelCase : int =[1, 2_0_4_8, 7, 7]
# Image classification docstring
__lowerCAmelCase : List[Any] ="""microsoft/resnet-50"""
__lowerCAmelCase : Any ="""tiger cat"""
__lowerCAmelCase : List[Any] =[
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" ):
"""simple docstring"""
super().__init__()
lowercase = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase )
lowercase = nn.BatchNormad(__lowerCAmelCase )
lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.convolution(__lowerCAmelCase )
lowercase = self.normalization(__lowerCAmelCase )
lowercase = self.activation(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase = config.num_channels
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase = self.embedder(__lowerCAmelCase )
lowercase = self.pooler(__lowerCAmelCase )
return embedding
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 ):
"""simple docstring"""
super().__init__()
lowercase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
lowercase = nn.BatchNormad(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.convolution(__lowerCAmelCase )
lowercase = self.normalization(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" ):
"""simple docstring"""
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , )
lowercase = ACTaFN[activation]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = hidden_state
lowercase = self.layer(__lowerCAmelCase )
lowercase = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowercase = self.activation(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 4 ):
"""simple docstring"""
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = out_channels // reduction
lowercase = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
lowercase = ACTaFN[activation]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = hidden_state
lowercase = self.layer(__lowerCAmelCase )
lowercase = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowercase = self.activation(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , ):
"""simple docstring"""
super().__init__()
lowercase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = input
for layer in self.layers:
lowercase = layer(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True ):
"""simple docstring"""
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(__lowerCAmelCase )
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
class _A ( lowerCAmelCase ):
snake_case__ : Dict = ResNetConfig
snake_case__ : Optional[int] = 'resnet'
snake_case__ : List[Any] = 'pixel_values'
snake_case__ : Any = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = value
__lowerCAmelCase : List[str] =R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCAmelCase : Optional[Any] =R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , lowerCAmelCase , )
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = config
lowercase = ResNetEmbeddings(__lowerCAmelCase )
lowercase = ResNetEncoder(__lowerCAmelCase )
lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(__lowerCAmelCase )
lowercase = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowercase = encoder_outputs[0]
lowercase = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase , )
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = config.num_labels
lowercase = ResNetModel(__lowerCAmelCase )
# classification head
lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier(__lowerCAmelCase )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , lowerCAmelCase , )
class _A ( lowerCAmelCase , lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
super()._init_backbone(__lowerCAmelCase )
lowercase = [config.embedding_size] + config.hidden_sizes
lowercase = ResNetEmbeddings(__lowerCAmelCase )
lowercase = ResNetEncoder(__lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = self.embedder(__lowerCAmelCase )
lowercase = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowercase = outputs.hidden_states
lowercase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
| 32
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Tuple = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 25
|
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class __magic_name__ ( lowerCAmelCase_ ):
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case = None ) -> int:
'''simple docstring'''
__a =max_length
__a =max_position_embeddings
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
__a =input_ids.shape[-1]
__a =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
'exceptions, performance degradation, or nothing at all.' )
return is_done
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
'with `max_length = start_length + max_new_tokens` instead.' , __snake_case , )
__a =start_length
__a =max_new_tokens
__a =start_length + max_new_tokens
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case = None ) -> int:
'''simple docstring'''
__a =max_time
__a =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __magic_name__ ( lowerCAmelCase_ ):
@add_start_docstrings(__snake_case )
def __call__( self , __snake_case , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return any(criteria(__snake_case , __snake_case ) for criteria in self )
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(__snake_case , __snake_case ):
return stopping_criterium.max_length
elif isinstance(__snake_case , __snake_case ):
return stopping_criterium.max_length
return None
def UpperCamelCase_( _snake_case : StoppingCriteriaList , _snake_case : int ):
"""simple docstring"""
__a =stopping_criteria.max_length
__a =deepcopy(_snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , _snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_snake_case ) )
return new_stopping_criteria
| 308
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308
| 1
|
from copy import deepcopy
class __lowerCAmelCase :
def __init__(self , __magic_name__ = None , __magic_name__ = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : List[Any] = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError('''Either arr or size must be specified''' )
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = len(__magic_name__ )
snake_case_ : str = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
snake_case_ : int = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase (self ) -> list[int]:
'''simple docstring'''
snake_case_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : List[Any] = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase (__magic_name__ ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase (__magic_name__ ) -> int:
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : str = self.next_(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
if right == 0:
return 0
snake_case_ : Optional[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : List[Any] = self.prev(__magic_name__ )
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279
| 1
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __snake_case : str , __snake_case : Tuple , __snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =TaConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ =TaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 6
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6
| 1
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_lowercase : Tuple = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCamelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323
| 0
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: str = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_: Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE_: int = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_: Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Tuple = Path(tmpdirname)
SCREAMING_SNAKE_CASE_: Optional[Any] = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
SCREAMING_SNAKE_CASE_: Any = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
SCREAMING_SNAKE_CASE_: Dict = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
SCREAMING_SNAKE_CASE_: int = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_: int = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_: List[str] = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
SCREAMING_SNAKE_CASE_: int = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE_: Any = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 369
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , __a : List[Any] , __a : Dict , __a : int="<|endoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : int="<|startoftext|>" , __a : Tuple="<|endoftext|>" , __a : Optional[int]=False , **__a : int , ):
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(__a , __a )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase (self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _lowercase (self : List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase (self : List[Any] , __a : int ):
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def _lowercase (self : List[Any] , __a : List[str] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def _lowercase (self : int , __a : List[Any] ):
return self.subword_tokenizer.convert_id_to_token(__a )
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = "".join(__a ).strip()
return out_string
def _lowercase (self : int , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def _lowercase (self : int , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = 0
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Dict , __a : Any , __a : int ):
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__(self : Dict ):
return len(self.ids_to_tokens )
def _lowercase (self : str , __a : Union[str, Any] ):
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , __a )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : str=False ):
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(__a , __a )
if clean:
UpperCAmelCase_ = self.clean_text(__a )
def check_simbol(__a : List[Any] ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(__a : Tuple ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(__a ):
UpperCAmelCase_ = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def _lowercase (self : int , __a : Optional[Any] , __a : Optional[int]="\n" ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(__a )
return text
| 106
| 0
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : Any ) -> str:
"""simple docstring"""
if openai_config_file == "":
a_ : Optional[Any] = OpenAIGPTConfig()
else:
a_ : int = OpenAIGPTConfig.from_json_file(__A )
a_ : Tuple = OpenAIGPTModel(__A )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__A , __A , __A )
# Save pytorch-model
a_ : Tuple = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a_ : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 32
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict:
"""simple docstring"""
a_ : Tuple = script.contents[0]
a_ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
a_ : Tuple = F"""https://www.instagram.com/{username}/"""
a_ : Optional[Any] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict:
a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Optional[int] ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
a_ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 32
| 1
|
lowercase_ = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 371
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _A ( _lowerCamelCase ):
@add_start_docstrings(_A )
def __call__( self : List[str] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Optional[int] ) -> bool:
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _A ( _lowerCamelCase ):
def __init__( self : str , _A : int , _A : Optional[int] = None ) -> int:
"""simple docstring"""
lowercase : Any = max_length
lowercase : List[Any] = max_position_embeddings
@add_start_docstrings(_A )
def __call__( self : Any , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Optional[int] ) -> bool:
"""simple docstring"""
lowercase : Union[str, Any] = input_ids.shape[-1]
lowercase : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _A ( _lowerCamelCase ):
def __init__( self : Optional[Any] , _A : int , _A : int ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , _A , )
lowercase : int = start_length
lowercase : List[Any] = max_new_tokens
lowercase : Optional[int] = start_length + max_new_tokens
@add_start_docstrings(_A )
def __call__( self : Optional[int] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : int ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class _A ( _lowerCamelCase ):
def __init__( self : Optional[int] , _A : float , _A : Optional[float] = None ) -> int:
"""simple docstring"""
lowercase : List[Any] = max_time
lowercase : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_A )
def __call__( self : Optional[int] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : Union[str, Any] ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class _A ( _lowerCamelCase ):
@add_start_docstrings(_A )
def __call__( self : Union[str, Any] , _A : torch.LongTensor , _A : torch.FloatTensor , **_A : List[Any] ) -> bool:
"""simple docstring"""
return any(criteria(_A , _A ) for criteria in self )
@property
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(_A , _A ):
return stopping_criterium.max_length
elif isinstance(_A , _A ):
return stopping_criterium.max_length
return None
def snake_case( __magic_name__ , __magic_name__ ) -> StoppingCriteriaList:
'''simple docstring'''
lowercase : List[str] = stopping_criteria.max_length
lowercase : Optional[int] = deepcopy(__magic_name__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __magic_name__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__magic_name__ ) )
return new_stopping_criteria
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = 'pascal-voc-id2label.json'
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Union[str, Any]:
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
SCREAMING_SNAKE_CASE = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace('.block.' , '.' )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
SCREAMING_SNAKE_CASE = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = 'mobilevit.' + name
return name
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any:
if base_model:
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = 'mobilevit.'
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase () -> Optional[Any]:
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> List[str]:
SCREAMING_SNAKE_CASE = get_mobilevit_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='apple' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='apple' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 368
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
SCREAMING_SNAKE_CASE_ : str = "audio"
SCREAMING_SNAKE_CASE_ : str = "labels"
def __A ( self , lowerCAmelCase__ ) -> Any:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE = copy.deepcopy(self )
SCREAMING_SNAKE_CASE = self.label_schema.copy()
SCREAMING_SNAKE_CASE = features[self.label_column]
SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 38
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[str]:
# Initialise PyTorch model
__a = TaConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
__a = TaForConditionalGeneration(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a__ , a__ , a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 6
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 6
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """bridgetower_vision_model"""
def __init__( self , A=768 , A=12 , A=3 , A=16 , A=288 , A=1 , A=1e-05 , A=False , A=True , A=False , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A )
a = hidden_size
a = num_hidden_layers
a = num_channels
a = patch_size
a = image_size
a = initializer_factor
a = layer_norm_eps
a = stop_gradient
a = share_layernorm
a = remove_last_layer
@classmethod
def lowerCAmelCase_ ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
a , a = cls.get_config_dict(A , **A )
if config_dict.get("model_type" ) == "bridgetower":
a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class a__ ( UpperCamelCase__ ):
a : List[Any] = """bridgetower_text_model"""
def __init__( self , A=50265 , A=768 , A=12 , A=12 , A=1 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=514 , A=1 , A=1e-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> int:
'''simple docstring'''
super().__init__(**A )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = initializer_factor
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = pad_token_id
a = bos_token_id
a = eos_token_id
@classmethod
def lowerCAmelCase_ ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
a , a = cls.get_config_dict(A , **A )
if config_dict.get("model_type" ) == "bridgetower":
a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class a__ ( UpperCamelCase__ ):
a : str = """bridgetower"""
def __init__( self , A=True , A="gelu" , A=768 , A=1 , A=1e-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Dict:
'''simple docstring'''
a = kwargs.pop("text_config_dict" , A )
a = kwargs.pop("vision_config_dict" , A )
super().__init__(**A )
a = share_cross_modal_transformer_layers
a = hidden_act
a = hidden_size
a = initializer_factor
a = layer_norm_eps
a = share_link_tower_layers
a = link_tower_type
a = num_attention_heads
a = num_hidden_layers
a = tie_word_embeddings
a = init_layernorm_from_vision_encoder
if text_config is None:
a = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
a = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
a = BridgeTowerTextConfig(**A )
a = BridgeTowerVisionConfig(**A )
@classmethod
def lowerCAmelCase_ ( cls , A , A , **A ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = copy.deepcopy(self.__dict__ )
a = self.text_config.to_dict()
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 180
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
lowercase__ : List[str] = "sshleifer/student_marian_en_ro_6_1"
lowercase__ : List[Any] = "sshleifer/tiny-mbart"
@require_torch
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self , A=False , A=None , A=True , A=True , A=True , A=True , ) -> List[Any]:
'''simple docstring'''
a = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
if not do_eval:
return
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
a = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , A )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=A , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=A )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
a = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
a = experiments[experiment_id]
a = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
a = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data["extra_args_str"] )
a = len(re.findall(A , cl.err ) )
self.assertEqual(A , data["n_matches"] )
@slow
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
a = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , A )
# test if do_predict saves generations and metrics
a = os.listdir(A )
a = {os.path.basename(A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A ) -> Tuple[int, float]:
a = "--skip_memory_metrics 0"
a = self.run_trainer(
max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
a = TrainerState.load_from_json(Path(A , "trainer_state.json" ) ).log_history
a = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
a = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
a = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
a = gpu_peak_mem_orig + gpu_alloc_mem_orig
a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
a = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
a = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
A , A , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
A , A , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self , A , A , A , A = 3e-3 , A = "adafactor" , A = False , A = None , A = 0 , A = True , A = True , A = True , A = True , A = None , ) -> Tuple:
'''simple docstring'''
a = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
a = self.get_auto_remove_tmp_dir()
a = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
a = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(A )}
'''.split()
a = "\n --do_predict\n ".split()
a = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
a = get_gpu_count()
a = get_torch_dist_unique_port()
a = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
a = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
else:
a = ["run_translation.py"] + args
with patch.object(A , "argv" , A ):
main()
return output_dir
| 180
| 1
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
snake_case__ : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
snake_case__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
snake_case__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case__ : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( self : Dict ):
UpperCamelCase :List[Any] = self.task_name.lower()
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = """train"""
snake_case__ : Union[str, Any] = """dev"""
snake_case__ : Union[str, Any] = """test"""
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : GlueDataTrainingArguments
snake_case__ : str
snake_case__ : List[InputFeatures]
def __init__( self : List[Any] , __lowerCamelCase : GlueDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizerBase , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[str] = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , __lowerCamelCase , )
UpperCamelCase :Tuple = args
UpperCamelCase :Dict = glue_processors[args.task_name]()
UpperCamelCase :Any = glue_output_modes[args.task_name]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
UpperCamelCase :Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCamelCase :Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase :Union[str, Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase :Dict = label_list[2], label_list[1]
UpperCamelCase :Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase :str = cached_features_file + """.lock"""
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
UpperCamelCase :List[Any] = time.time()
UpperCamelCase :Optional[Any] = torch.load(__lowerCamelCase )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase :int = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase :Optional[Any] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase :str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase :int = examples[:limit_length]
UpperCamelCase :Optional[int] = glue_convert_examples_to_features(
__lowerCamelCase , __lowerCamelCase , max_length=args.max_seq_length , label_list=__lowerCamelCase , output_mode=self.output_mode , )
UpperCamelCase :List[Any] = time.time()
torch.save(self.features , __lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : int ):
return len(self.features )
def __getitem__( self : Optional[int] , __lowerCamelCase : Any ):
return self.features[i]
def _A ( self : int ):
return self.label_list
| 38
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Dict ,lowercase_ : Dict=7 ,lowercase_ : Optional[int]=3 ,lowercase_ : int=3_0 ,lowercase_ : Optional[Any]=4_0_0 ,lowercase_ : Any=True ,lowercase_ : List[str]=None ,lowercase_ : str=True ,lowercase_ : List[Any]=[0.5, 0.5, 0.5] ,lowercase_ : List[str]=[0.5, 0.5, 0.5] ,lowercase_ : Any=True ,lowercase_ : Union[str, Any]=1 / 2_5_5 ,lowercase_ : str=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : List[str] = image_mean
lowerCAmelCase__ : str = image_std
lowerCAmelCase__ : Optional[Any] = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : Optional[Any] = do_pad
def __lowerCAmelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : int=False ):
if not batched:
lowerCAmelCase__ : Tuple = image_inputs[0]
if isinstance(lowercase_ ,Image.Image ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = image.size
else:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : Any = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase__ : str = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase__ : Union[str, Any] = self.size['''shortest_edge''']
lowerCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase__ : List[str] = self.size['''shortest_edge''']
lowerCAmelCase__ : str = self.size['''shortest_edge''']
else:
lowerCAmelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : List[str] = max(lowercase_ ,key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase__ : Any = max(lowercase_ ,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_rescale''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_pad''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad ,lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Dict ):
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __lowerCAmelCase ( self : Tuple ):
# prepare image and target
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCAmelCase__ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCAmelCase__ : Optional[Any] = DetaImageProcessor()
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify orig_size
lowerCAmelCase__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
@slow
def __lowerCAmelCase ( self : Any ):
# prepare image, target and masks_path
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : str = json.loads(f.read() )
lowerCAmelCase__ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCAmelCase__ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase__ : str = DetaImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,masks_path=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify masks
lowerCAmelCase__ : Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,lowercase_ )
# verify orig_size
lowerCAmelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
| 106
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
A : Any = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
A : Tuple = '''UperNetConfig'''
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[int, Tuple[int, int]] , __lowerCAmelCase : Union[int, Tuple[int, int], str] = 0 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
"""simple docstring"""
super().__init__()
A__ = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , bias=__lowerCAmelCase , dilation=__lowerCAmelCase , )
A__ = nn.BatchNormad(__lowerCAmelCase )
A__ = nn.ReLU()
def a_ ( self : Dict , __lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
A__ = self.conv(__lowerCAmelCase )
A__ = self.batch_norm(__lowerCAmelCase )
A__ = self.activation(__lowerCAmelCase )
return output
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
super().__init__()
A__ = [
nn.AdaptiveAvgPoolad(__lowerCAmelCase ),
UperNetConvModule(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Any , __lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
A__ = input
for layer in self.layers:
A__ = layer(__lowerCAmelCase )
return hidden_state
class A (nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : Tuple[int, ...] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool ) -> None:
"""simple docstring"""
super().__init__()
A__ = pool_scales
A__ = align_corners
A__ = in_channels
A__ = channels
A__ = []
for i, pool_scale in enumerate(__lowerCAmelCase ):
A__ = UperNetPyramidPoolingBlock(pool_scale=__lowerCAmelCase , in_channels=__lowerCAmelCase , channels=__lowerCAmelCase )
self.blocks.append(__lowerCAmelCase )
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : str , __lowerCAmelCase : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
A__ = []
for ppm in self.blocks:
A__ = ppm(__lowerCAmelCase )
A__ = nn.functional.interpolate(
__lowerCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowerCAmelCase )
return ppm_outs
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
super().__init__()
A__ = config
A__ = config.pool_scales # e.g. (1, 2, 3, 6)
A__ = in_channels
A__ = config.hidden_size
A__ = False
A__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
A__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
A__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
A__ = nn.ModuleList()
A__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
A__ = UperNetConvModule(__lowerCAmelCase , self.channels , kernel_size=1 )
A__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCAmelCase )
self.fpn_convs.append(__lowerCAmelCase )
A__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def a_ ( self : str ) -> str:
"""simple docstring"""
self.apply(self._init_weights )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def a_ ( self : Any , __lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = inputs[-1]
A__ = [x]
psp_outs.extend(self.psp_modules(__lowerCAmelCase ) )
A__ = torch.cat(__lowerCAmelCase , dim=1 )
A__ = self.bottleneck(__lowerCAmelCase )
return output
def a_ ( self : Dict , __lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
A__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCAmelCase ) )
# build top-down path
A__ = len(__lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
A__ = laterals[i - 1].shape[2:]
A__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
A__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
A__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
A__ = torch.cat(__lowerCAmelCase , dim=1 )
A__ = self.fpn_bottleneck(__lowerCAmelCase )
A__ = self.classifier(__lowerCAmelCase )
return output
class A (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
A__ = config
A__ = config.auxiliary_in_channels
A__ = config.auxiliary_channels
A__ = config.auxiliary_num_convs
A__ = config.auxiliary_concat_input
A__ = in_index
A__ = (kernel_size // 2) * dilation
A__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
if self.num_convs == 0:
A__ = nn.Identity()
else:
A__ = nn.Sequential(*__lowerCAmelCase )
if self.concat_input:
A__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=kernel_size // 2 )
A__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def a_ ( self : List[str] , __lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
A__ = encoder_hidden_states[self.in_index]
A__ = self.convs(__lowerCAmelCase )
if self.concat_input:
A__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
A__ = self.classifier(__lowerCAmelCase )
return output
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = UperNetConfig
__lowerCamelCase : Union[str, Any] = '''pixel_values'''
__lowerCamelCase : List[Any] = True
def a_ ( self : str , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Dict=False ) -> Dict:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = value
A : str = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , SCREAMING_SNAKE_CASE , )
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
A__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
A__ = UperNetHead(__lowerCAmelCase , in_channels=self.backbone.channels )
A__ = UperNetFCNHead(__lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def a_ ( self : Any , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = output_attentions if output_attentions is not None else self.config.output_attentions
A__ = self.backbone.forward_with_filtered_kwargs(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
A__ = outputs.feature_maps
A__ = self.decode_head(__lowerCAmelCase )
A__ = nn.functional.interpolate(__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
A__ = None
if self.auxiliary_head is not None:
A__ = self.auxiliary_head(__lowerCAmelCase )
A__ = nn.functional.interpolate(
__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
A__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
A__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
A__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
A__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
A__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
A__ = (logits,) + outputs[1:]
else:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 351
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A : Optional[int] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(__lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=None ) -> Dict:
"""simple docstring"""
A__ = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A__ = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
A__ = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , )
# Copy consistency with a really long name
A__ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
self.assertFalse(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__lowerCAmelCase )
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 276
| 0
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : Optional[int] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : Optional[int] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Union[str, Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Optional[int] = re.compile(R'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Dict = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Tuple = re.compile('''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : str = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Tuple = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Optional[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : Optional[int] = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Tuple ):
if _re_test_backend.search(__snake_case ) is None:
return None
lowerCAmelCase : Dict = [b[0] for b in _re_backend.findall(__snake_case )]
backends.sort()
return "_and_".join(__snake_case )
def _snake_case ( _snake_case : Optional[int] ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : Tuple = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(__snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : Optional[int] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(__snake_case ).groups()[0]
lowerCAmelCase : Union[str, Any] = re.findall('''\[([^\]]+)\]''' , __snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Union[str, Any] = _re_import_struct_key_value.search(__snake_case )
if single_line_import_search is not None:
lowerCAmelCase : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(__snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(__snake_case ) is not None:
lowerCAmelCase : Any = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_between_brackets.search(__snake_case ) is not None:
lowerCAmelCase : List[str] = _re_between_brackets.search(__snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_quote_object.search(__snake_case ) is not None:
objects.append(_re_quote_object.search(__snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Tuple = []
while (
line_index < len(__snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : List[str] = lines[line_index]
lowerCAmelCase : Optional[Any] = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : str = lines[line_index]
lowerCAmelCase : str = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
def find_duplicates(_snake_case : Union[str, Any] ):
return [k for k, v in collections.Counter(__snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Dict = []
for key in import_dict_objects.keys():
lowerCAmelCase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Dict = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = os.path.join(__snake_case , '''__init__.py''' )
lowerCAmelCase : Dict = parse_init(__snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*__snake_case )
if len(__snake_case ) > 0:
lowerCAmelCase : List[str] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(__snake_case ) )
if len(__snake_case ) > 0:
raise ValueError('''\n\n'''.join(__snake_case ) )
def _snake_case ( ):
lowerCAmelCase : int = []
for path, directories, files in os.walk(__snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Any = str((Path(__snake_case ) / folder).relative_to(__snake_case ) )
lowerCAmelCase : Tuple = short_path.replace(os.path.sep , '''.''' )
submodules.append(__snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Any = str((Path(__snake_case ) / fname).relative_to(__snake_case ) )
lowerCAmelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__snake_case )
return submodules
snake_case__ : List[Any] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
lowerCAmelCase : int = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Dict = spec.loader.load_module()
lowerCAmelCase : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__snake_case ) > 0:
lowerCAmelCase : List[Any] = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 60
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "vit_msn"
def __init__( self : List[Any] , __a : Union[str, Any]=768 , __a : Union[str, Any]=12 , __a : Any=12 , __a : int=3072 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=0.0 , __a : int=0.0 , __a : Optional[Any]=0.02 , __a : Dict=1e-0_6 , __a : List[Any]=224 , __a : Optional[int]=16 , __a : int=3 , __a : str=True , **__a : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__a )
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Any = qkv_bias
| 363
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
from __future__ import annotations
SCREAMING_SNAKE_CASE :Union[str, Any] = tuple[int, int, int]
SCREAMING_SNAKE_CASE :Optional[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE :Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE :Any = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
SCREAMING_SNAKE_CASE :Any = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
SCREAMING_SNAKE_CASE :Union[str, Any] = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
SCREAMING_SNAKE_CASE :Any = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE :Optional[Any] = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
SCREAMING_SNAKE_CASE :List[str] = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
SCREAMING_SNAKE_CASE :Optional[Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
SCREAMING_SNAKE_CASE :Union[str, Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
SCREAMING_SNAKE_CASE :Tuple = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
SCREAMING_SNAKE_CASE :int = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def _lowerCAmelCase ( lowerCAmelCase_ :RotorPositionT , lowerCAmelCase_ :RotorSelectionT , lowerCAmelCase_ :str )->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(lowerCAmelCase_ ) )) < 3:
snake_case_ = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCAmelCase_ )
# Checks if rotor positions are valid
snake_case_ = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
snake_case_ = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
snake_case_ = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
snake_case_ = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase_ )
# Validates string and returns dict
snake_case_ = _plugboard(lowerCAmelCase_ )
return rotpos, rotsel, pbdict
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[str, str]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = F'''Plugboard setting isn\'t type string ({type(lowerCAmelCase_ )})'''
raise TypeError(lowerCAmelCase_ )
elif len(lowerCAmelCase_ ) % 2 != 0:
snake_case_ = F'''Odd number of symbols ({len(lowerCAmelCase_ )})'''
raise Exception(lowerCAmelCase_ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
snake_case_ = set()
for i in pbstring:
if i not in abc:
snake_case_ = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCAmelCase_ )
elif i in tmppbl:
snake_case_ = F'''Duplicate symbol ({i})'''
raise Exception(lowerCAmelCase_ )
else:
tmppbl.add(lowerCAmelCase_ )
del tmppbl
# Created the dictionary
snake_case_ = {}
for j in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ):
snake_case_ = pbstring[j + 1]
snake_case_ = pbstring[j]
return pb
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :RotorPositionT , lowerCAmelCase_ :RotorSelectionT = (rotora, rotora, rotora) , lowerCAmelCase_ :str = "" , )->str:
'''simple docstring'''
snake_case_ = text.upper()
snake_case_ = _validator(
lowerCAmelCase_ , lowerCAmelCase_ , plugb.upper() )
snake_case_ = rotor_position
snake_case_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# rotor ra --------------------------
snake_case_ = abc.index(lowerCAmelCase_ ) + rotorposa
snake_case_ = rotora[index % len(lowerCAmelCase_ )]
# rotor rb --------------------------
snake_case_ = abc.index(lowerCAmelCase_ ) + rotorposa
snake_case_ = rotora[index % len(lowerCAmelCase_ )]
# rotor rc --------------------------
snake_case_ = abc.index(lowerCAmelCase_ ) + rotorposa
snake_case_ = rotora[index % len(lowerCAmelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case_ = reflector[symbol]
# 2nd rotors
snake_case_ = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
snake_case_ = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
snake_case_ = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
snake_case_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = '''This is my Python script that emulates the Enigma machine from WWII.'''
SCREAMING_SNAKE_CASE :Optional[int] = (1, 1, 1)
SCREAMING_SNAKE_CASE :List[str] = '''pictures'''
SCREAMING_SNAKE_CASE :int = (rotora, rotora, rotora)
SCREAMING_SNAKE_CASE :Union[str, Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 159
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCAmelCase_ : int = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCAmelCase_ : int = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ )
UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = {}
for id_pred, label in zip(__magic_name__ , __magic_name__ ):
UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCamelCase :Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase :Dict = [(pred, label)]
UpperCamelCase , UpperCamelCase :Optional[int] = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ )
UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" )
fas.append(__magic_name__ )
UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) )
ems.append(__magic_name__ )
UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) )
UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ )
UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : str ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _A ( self : Optional[Any] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCamelCase :Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 38
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCamelCase__ : str = "base_with_context"
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE_ = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = ly_weight["""attention"""]
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE_ = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE_ = ly_weight["""attention"""]
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE_ = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = ly_weight["""self_attention"""]
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = ly_weight["""MultiHeadDotProductAttention_0"""]
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE_ = jnp.tree_util.tree_map(onp.array , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
SCREAMING_SNAKE_CASE_ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
SCREAMING_SNAKE_CASE_ = inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
SCREAMING_SNAKE_CASE_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE_ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = load_decoder(ta_checkpoint['target']['decoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
SCREAMING_SNAKE_CASE_ = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCamelCase__ : List[str] = parser.parse_args()
main(args)
| 362
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : Union[str, Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=None , ) -> List[Any]:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : str=4 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Union[str, Any]=0.02 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = initializer_range
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : str ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
SCREAMING_SNAKE_CASE_ = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
SCREAMING_SNAKE_CASE_ = ['Sam']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='jax' )
SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'Sam is a great name. It means "sun" in Gaelic.'
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , **_lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 210
| 0
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a ( ctypes.Structure ):
"""simple docstring"""
lowerCamelCase :List[Any] = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def snake_case ( ) -> List[Any]:
if os.name == "nt":
_A = CursorInfo()
_A = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__))
_A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def snake_case ( ) -> List[str]:
if os.name == "nt":
_A = CursorInfo()
_A = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__))
_A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def snake_case ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 180
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = JukeboxTokenizer
lowerCamelCase :str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 180
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : List[Any] , *_A : List[str] , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[Any] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Tuple , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 299
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : int = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = fairseq_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : Tuple = '''weight'''
else:
UpperCAmelCase__ : Optional[Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : int = UniSpeechSatConfig()
UpperCAmelCase__ : Tuple = ''''''
if is_finetuned:
UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 299
| 1
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276
| 0
|
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase : Optional[int] = str(bin(_UpperCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase : Union[str, Any] = str(bin(_UpperCAmelCase ) )[2:]
if shift_amount >= len(_UpperCAmelCase ):
return "0b0"
lowercase : Optional[Any] = binary_number[: len(_UpperCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
lowercase : Tuple = '0' + str(bin(_UpperCAmelCase ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Tuple = len(bin(_UpperCAmelCase )[3:] ) # Find 2's complement of number
lowercase : str = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
lowercase : Optional[Any] = (
'1' + '0' * (binary_number_length - len(_UpperCAmelCase )) + binary_number
)
if shift_amount >= len(_UpperCAmelCase ):
return "0b" + binary_number[0] * len(_UpperCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_UpperCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase: Any = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Tuple, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : int = 8, **lowerCAmelCase : Optional[int], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Dict = do_rescale
lowercase : Tuple = rescale_factor
lowercase : List[str] = do_pad
lowercase : int = pad_size
def lowercase ( self : List[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : float, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int ) -> np.ndarray:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : int, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> List[Any]:
lowercase , lowercase : Tuple = get_image_size(lowerCAmelCase )
lowercase : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase : Dict = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[float] = None, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCAmelCase : Any, ) -> List[Any]:
lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any = do_pad if do_pad is not None else self.do_pad
lowercase : int = pad_size if pad_size is not None else self.pad_size
lowercase : Tuple = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Optional[int] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_pad:
lowercase : List[str] = [self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images]
lowercase : Optional[int] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {}
class A ( _a ):
__snake_case = 'llama'
__snake_case = ['past_key_values']
def __init__( self, UpperCamelCase__=3_2000, UpperCamelCase__=4096, UpperCamelCase__=1_1008, UpperCamelCase__=32, UpperCamelCase__=32, UpperCamelCase__=None, UpperCamelCase__="silu", UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-6, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=1, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=False, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = num_key_value_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = rms_norm_eps
lowerCAmelCase_ = pretraining_tp
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, tie_word_embeddings=A_, **A_, )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', A_ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(A_, A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 278
|
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase__ :Any = logging.get_logger(__name__)
lowerCAmelCase__ :Dict = {'''vocab_file''': '''vocab.txt'''}
lowerCAmelCase__ :Dict = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowerCAmelCase__ :Optional[int] = {
'''YituTech/conv-bert-base''': 5_1_2,
'''YituTech/conv-bert-medium-small''': 5_1_2,
'''YituTech/conv-bert-small''': 5_1_2,
}
lowerCAmelCase__ :Dict = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __a ( A__ ):
_a : Optional[Any] = VOCAB_FILES_NAMES
_a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Tuple = PRETRAINED_INIT_CONFIGURATION
_a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(lowerCamelCase__ , normalizer_state.pop('type' ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase = do_lower_case
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 357
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ :Optional[Any] = datasets.logging.get_logger(__name__)
lowerCAmelCase__ :str = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase__ :List[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase__ :Optional[int] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowerCAmelCase__ ( a__: int , a__: int , a__: Dict=False , a__: str=False , a__: Optional[int]=True , a__: Any=False , a__: str="dummy_doc" ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {doc: key_lines}
_UpperCAmelCase = {doc: sys_lines}
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , key_doc_lines[doc] , a__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(a__ , sys_doc_lines[doc] , a__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(a__ , key_doc_lines[doc] , a__ , a__ )
if remove_nested:
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(a__ , a__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = reader.get_mention_assignments(a__ , a__ )
_UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def lowerCAmelCase__ ( a__: Any , a__: List[str] , a__: List[str] , a__: Optional[int] , a__: Optional[Any] , a__: Any , a__: Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = get_coref_infos(a__ , a__ , a__ , a__ , a__ , a__ )
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for name, metric in metrics:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = evaluator.evaluate_documents(a__ , a__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(1_0 ) , F'''Recall: {recall * 1_0_0:.2f}''' , F''' Precision: {precision * 1_0_0:.2f}''' , F''' F1: {fa * 1_0_0:.2f}''' , )
if conll_subparts_num == 3:
_UpperCAmelCase = (conll / 3) * 1_0_0
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_UpperCAmelCase = line.split()[5]
if not parse_col == "-":
_UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_UpperCAmelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_UpperCAmelCase = util.check_gold_parse_annotation(_SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCAmelCase = evaluate(
key_lines=_SCREAMING_SNAKE_CASE , sys_lines=_SCREAMING_SNAKE_CASE , metrics=_SCREAMING_SNAKE_CASE , NP_only=_SCREAMING_SNAKE_CASE , remove_nested=_SCREAMING_SNAKE_CASE , keep_singletons=_SCREAMING_SNAKE_CASE , min_span=_SCREAMING_SNAKE_CASE , )
return score
| 185
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.