code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCamelCase : str = test_metrics
@require_cpu
def A_ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def A_ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def A_ ( self ):
self.test_metrics.main()
@require_multi_gpu
def A_ ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
_lowerCamelCase : Optional[int] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
| 630
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = len(lowercase__ )
# We need to create solution object to save path.
_lowerCamelCase : Tuple = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
_lowerCamelCase : Optional[Any] = run_maze(lowercase__ , 0 , 0 , lowercase__ )
if solved:
print('\n'.join(str(lowercase__ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[int] = len(lowercase__ )
# Final check point.
if i == j == (size - 1):
_lowerCamelCase : Optional[Any] = 1
return True
_lowerCamelCase : List[str] = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCamelCase : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCamelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCamelCase : Union[str, Any] = 1
# check for directions
if (
run_maze(lowercase__ , i + 1 , lowercase__ , lowercase__ )
or run_maze(lowercase__ , lowercase__ , j + 1 , lowercase__ )
or run_maze(lowercase__ , i - 1 , lowercase__ , lowercase__ )
or run_maze(lowercase__ , lowercase__ , j - 1 , lowercase__ )
):
return True
_lowerCamelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 22
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 22
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=400 , UpperCAmelCase=2000 , UpperCAmelCase=1 , UpperCAmelCase=0.0 , UpperCAmelCase=16000 , UpperCAmelCase=True , UpperCAmelCase=80 , UpperCAmelCase=16 , UpperCAmelCase=64 , UpperCAmelCase="hann_window" , UpperCAmelCase=80 , UpperCAmelCase=7600 , UpperCAmelCase=1e-1_0 , UpperCAmelCase=True , ) -> str:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = do_normalize
_snake_case = num_mel_bins
_snake_case = hop_length
_snake_case = win_length
_snake_case = win_function
_snake_case = fmin
_snake_case = fmax
_snake_case = mel_floor
_snake_case = return_attention_mask
def lowercase (self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowercase (self , UpperCAmelCase=False , UpperCAmelCase=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowercase (self , UpperCAmelCase=False , UpperCAmelCase=False ) -> Optional[Any]:
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = SpeechTaFeatureExtractor
def lowercase (self ) -> Optional[int]:
_snake_case = SpeechTaFeatureExtractionTester(self )
def lowercase (self , UpperCAmelCase ) -> Tuple:
self.assertTrue(np.all(np.mean(UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase (self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
_snake_case = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
_snake_case = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def lowercase (self ) -> str:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ["""longest""", """max_length""", """do_not_pad"""]
_snake_case = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = feat_extract(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="""np""" )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowercase (self ) -> List[Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = range(800 , 1400 , 200 )
_snake_case = [floats_list((1, x) )[0] for x in lengths]
_snake_case = ["""longest""", """max_length""", """do_not_pad"""]
_snake_case = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = feat_extract(UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowercase (self ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowercase (self ) -> str:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def lowercase (self ) -> Any:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(100 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase (self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
_snake_case = feature_extractor(audio_target=UpperCAmelCase , padding=UpperCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_snake_case = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
_snake_case = feature_extractor(UpperCAmelCase , return_tensors="""np""" ).input_values
_snake_case = feature_extractor(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(UpperCAmelCase )
_snake_case = feature_extractor(UpperCAmelCase , return_tensors="""np""" ).input_values
_snake_case = feature_extractor(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def lowercase (self ) -> List[str]:
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase ) == len(UpperCAmelCase ) for x, y in zip(UpperCAmelCase , processed_features[input_name] ) ) )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCAmelCase )
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowercase (self ) -> Any:
_snake_case = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCAmelCase )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowercase (self ) -> List[Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.num_mel_bins # hack!
_snake_case = feat_extract.pad(UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
_snake_case = feat_extract.pad(UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowercase (self ) -> Any:
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**UpperCAmelCase )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = [len(UpperCAmelCase ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.num_mel_bins # hack!
_snake_case = feat_extract.pad(UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**UpperCAmelCase )
_snake_case = self.feat_extract_tester.prepare_inputs_for_target()
_snake_case = [len(UpperCAmelCase ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = min(UpperCAmelCase )
_snake_case = feat_extract.num_mel_bins # hack!
_snake_case = feat_extract.pad(
UpperCAmelCase , padding="""max_length""" , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
from datasets import load_dataset
_snake_case = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_snake_case = ds.sort("""id""" ).select(range(UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase (self ) -> Optional[int]:
# fmt: off
_snake_case = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = SpeechTaFeatureExtractor()
_snake_case = feature_extractor(UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , UpperCAmelCase , atol=1e-6 ) )
def lowercase (self ) -> int:
# fmt: off
_snake_case = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = SpeechTaFeatureExtractor()
_snake_case = feature_extractor(audio_target=UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCAmelCase , atol=1e-4 ) )
| 585
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__lowerCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__lowerCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return float((preds == labels).mean() )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="binary" ):
_snake_case = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
_snake_case = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_snake_case = [(pred, label)]
_snake_case, _snake_case = [], []
for question, preds_labels in question_map.items():
_snake_case, _snake_case = zip(*_SCREAMING_SNAKE_CASE )
_snake_case = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average="""macro""" )
fas.append(_SCREAMING_SNAKE_CASE )
_snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
_snake_case = float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
_snake_case = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
_snake_case = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> int:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowercase (self ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase , UpperCAmelCase , fa_avg="""macro""" )
elif self.config_name == "record":
_snake_case = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
_snake_case = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(UpperCAmelCase , UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 585
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Any = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : int = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
_lowerCAmelCase : Any = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __snake_case ( snake_case__ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = NllbTokenizer
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def __init__( self ,a_=None ,a_=None ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,a_=None ,a_=None ,a_=None ,a_=False ,**a_ ,):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
lowerCAmelCase__ = legacy_behaviour
super().__init__(
vocab_file=_A ,tokenizer_file=_A ,bos_token=_A ,eos_token=_A ,sep_token=_A ,cls_token=_A ,unk_token=_A ,pad_token=_A ,mask_token=_A ,src_lang=_A ,tgt_lang=_A ,additional_special_tokens=_A ,legacy_behaviour=_A ,**_A ,)
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
lowerCAmelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCAmelCase__ = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCAmelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,**a_ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(_A ,add_special_tokens=_A ,return_tensors=_A ,**_A )
lowerCAmelCase__ = self.convert_tokens_to_ids(_A )
lowerCAmelCase__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = "eng_Latn" ,a_ = None ,a_ = "fra_Latn" ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(_A ,_A ,**_A )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
lowerCAmelCase__ = []
lowerCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ = [self.cur_lang_code]
lowerCAmelCase__ = [self.eos_token_id]
lowerCAmelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
lowerCAmelCase__ = []
lowerCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ = [self.cur_lang_code]
lowerCAmelCase__ = [self.eos_token_id]
lowerCAmelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCAmelCase__ = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file ,_A )
return (out_vocab_file,)
| 711
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if "resnet-50" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
lowerCAmelCase__ = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
lowerCAmelCase__ = 'panoptic' in model_name
if is_panoptic:
lowerCAmelCase__ = 250
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'coco-detection-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
def UpperCAmelCase_ ( snake_case__ , snake_case__=False ) -> str:
"""simple docstring"""
lowerCAmelCase__ = ''
if is_panoptic:
lowerCAmelCase__ = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[:256]
lowerCAmelCase__ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase__ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[-256:]
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = get_detr_config(snake_case__ )
# load original model from torch hub
lowerCAmelCase__ = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f'Converting model {model_name}...' )
lowerCAmelCase__ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
lowerCAmelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
lowerCAmelCase__ = 'detr.' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCAmelCase__ = DetrImageProcessor(format=snake_case__ )
lowerCAmelCase__ = processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ = encoding['pixel_values']
lowerCAmelCase__ = detr(snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 604
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__snake_case :List[Any] =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
A_ : Optional[datasets.Features] = None
def lowerCamelCase_ ( lowerCAmelCase__ : "pyspark.sql.DataFrame" , lowerCAmelCase__ : List[int] , ) -> int:
'''simple docstring'''
import pyspark
def generate_fn():
A = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
A = df_with_partition_id.select('*' ).where(F'''part_id = {partition_id}''' ).drop('part_id' )
A = partition_df.collect()
A = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
def __init__( self : List[str] , __UpperCamelCase : "pyspark.sql.DataFrame" , __UpperCamelCase : str=None , ) -> int:
A = df
A = partition_order or range(self.df.rdd.getNumPartitions() )
A = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[str] ) -> Union[str, Any]:
yield from self.generate_examples_fn()
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : np.random.Generator ) -> "SparkExamplesIterable":
A = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCamelCase )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : int ) -> "SparkExamplesIterable":
A = self.split_shard_indices_by_worker(__UpperCamelCase , __UpperCamelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCamelCase )
@property
def __UpperCamelCase ( self : List[Any] ) -> int:
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
A_ : Union[str, Any] = SparkConfig
def __init__( self : List[Any] , __UpperCamelCase : "pyspark.sql.DataFrame" , __UpperCamelCase : str = None , __UpperCamelCase : str = None , **__UpperCamelCase : Optional[int] , ) -> Union[str, Any]:
import pyspark
A = pyspark.sql.SparkSession.builder.getOrCreate()
A = df
A = working_dir
super().__init__(
cache_dir=__UpperCamelCase , config_name=str(self.df.semanticHash() ) , **__UpperCamelCase , )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
# Returns the path of the created file.
def create_cache_and_write_probe(__UpperCamelCase : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__UpperCamelCase )
A = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__UpperCamelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__UpperCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : datasets.download.download_manager.DownloadManager ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : List[str] ) -> str:
import pyspark
def get_arrow_batch_size(__UpperCamelCase : Optional[int] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
A = self.df.count()
A = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A = (
self.df.limit(__UpperCamelCase )
.repartition(1 )
.mapInArrow(__UpperCamelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A = min(__UpperCamelCase , int(approx_total_size / max_shard_size ) )
A = self.df.repartition(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
A = ParquetWriter if file_format == 'parquet' else ArrowWriter
A = os.path.join(self._working_dir , os.path.basename(__UpperCamelCase ) ) if self._working_dir else fpath
A = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A = self.config.features
A = self._writer_batch_size
A = self._fs.storage_options
def write_arrow(__UpperCamelCase : Dict ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A = pyspark.TaskContext().taskAttemptId()
A = next(__UpperCamelCase , __UpperCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
A = 0
A = writer_class(
features=__UpperCamelCase , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=__UpperCamelCase , storage_options=__UpperCamelCase , embed_local_files=__UpperCamelCase , )
A = pa.Table.from_batches([first_batch] )
writer.write_table(__UpperCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A , A = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
A = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=__UpperCamelCase , storage_options=__UpperCamelCase , embed_local_files=__UpperCamelCase , )
A = pa.Table.from_batches([batch] )
writer.write_table(__UpperCamelCase )
if writer._num_bytes > 0:
A , A = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__UpperCamelCase ) ):
A = os.path.join(os.path.dirname(__UpperCamelCase ) , os.path.basename(__UpperCamelCase ) )
shutil.move(__UpperCamelCase , __UpperCamelCase )
A = (
self.df.mapInArrow(__UpperCamelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __UpperCamelCase ( self : str , __UpperCamelCase : "datasets.SplitGenerator" , __UpperCamelCase : str = "arrow" , __UpperCamelCase : Optional[Union[str, int]] = None , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Optional[int] , ) -> int:
self._validate_cache_dir()
A = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__UpperCamelCase )
A = not is_remote_filesystem(self._fs )
A = os.path.join if is_local else posixpath.join
A = '-TTTTT-SSSSS-of-NNNNN'
A = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
A = path_join(self._output_dir , __UpperCamelCase )
A = 0
A = 0
A = 0
A = []
A = []
for task_id, content in self._prepare_split_single(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__UpperCamelCase )
A = total_num_examples
A = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
A = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , ):
rename(
__UpperCamelCase , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
A = []
A = 0
for i in range(len(__UpperCamelCase ) ):
A , A = task_id_and_num_shards[i]
for shard_id in range(__UpperCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__UpperCamelCase , len(__UpperCamelCase ) ).map(lambda __UpperCamelCase : _rename_shard(*__UpperCamelCase ) ).collect()
else:
# don't use any pattern
A = 0
A = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(__UpperCamelCase , '' ) , )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 106
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case__ ( lowercase , lowercase ):
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: Any = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_: Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: List[str] = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase_: List[str] = features.copy() if features else default_expected_features
lowerCAmelCase_: Any = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_: int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def snake_case__ ( lowercase ):
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
lowerCAmelCase_: Any = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Optional[int] = tmp_path / "cache"
lowerCAmelCase_: Optional[Any] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: str = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase_: Union[str, Any] = iter_sql_file(lowercase )
lowerCAmelCase_: str = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: str = tmp_path / "cache"
lowerCAmelCase_: Optional[int] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase_: Optional[Any] = iter_sql_file(lowercase )
lowerCAmelCase_: Optional[int] = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Union[str, Any] = tmp_path / "cache"
lowerCAmelCase_: int = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Any = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 613
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[Any] =TapasConfig.from_json_file(__a )
# set absolute/relative position embeddings parameter
lowerCamelCase__: int =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCamelCase__: Optional[int] =TapasForQuestionAnswering(config=__a )
elif task == "WTQ":
# run_task_main.py hparams
lowerCamelCase__: Optional[Any] =4
lowerCamelCase__: List[Any] =True
# hparam_utils.py hparams
lowerCamelCase__: Any =0.6_6_4_6_9_4
lowerCamelCase__: List[Any] =0.2_0_7_9_5_1
lowerCamelCase__: str =0.1_2_1_1_9_4
lowerCamelCase__: List[str] =True
lowerCamelCase__: Any =True
lowerCamelCase__: Dict =False
lowerCamelCase__: Optional[int] =0.0_3_5_2_5_1_3
lowerCamelCase__: Optional[int] =TapasForQuestionAnswering(config=__a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCamelCase__: Tuple =4
lowerCamelCase__: Optional[int] =False
# hparam_utils.py hparams
lowerCamelCase__: int =36.4519
lowerCamelCase__: Any =0.9_0_3_4_2_1
lowerCamelCase__: Any =222.088
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[str] =True
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[str] =0.7_6_3_1_4_1
lowerCamelCase__: List[str] =TapasForQuestionAnswering(config=__a )
elif task == "TABFACT":
lowerCamelCase__: Union[str, Any] =TapasForSequenceClassification(config=__a )
elif task == "MLM":
lowerCamelCase__: Dict =TapasForMaskedLM(config=__a )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCamelCase__: Any =TapasModel(config=__a )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__a , __a , __a )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCamelCase__: List[Any] =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__a )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 711
|
import re
def lowerCAmelCase_ ( __a ) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase_ ( __a , __a , __a ) -> str:
"""simple docstring"""
try:
lowerCamelCase__: Optional[Any] =split_input(__a )
if upper:
lowerCamelCase__: Tuple ="".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCamelCase__: Dict ="".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
return to_simple_case(__a )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
try:
lowerCamelCase__: Any =to_simple_case(__a )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
return to_complex_case(__a , __a , "_" )
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
return to_complex_case(__a , __a , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 437
| 0
|
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase = "" , lowerCamelCase = False ):
'''simple docstring'''
__A : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__A : int = is_leaf
__A : List[str] = prefix
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = 0
for q, w in zip(self.prefix , lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if self.prefix == word:
__A : Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__A : str = RadixNode(prefix=lowerCamelCase , is_leaf=lowerCamelCase )
else:
__A : Tuple = self.nodes[word[0]]
__A ,__A ,__A : Optional[Any] = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__A : List[str] = remaining_prefix
__A : Union[str, Any] = self.nodes[matching_string[0]]
__A : List[Any] = RadixNode(lowerCamelCase , lowerCamelCase )
__A : str = aux_node
if remaining_word == "":
__A : int = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = self.nodes.get(word[0] , lowerCamelCase )
if not incoming_node:
return False
else:
__A ,__A ,__A : str = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.nodes.get(word[0] , lowerCamelCase )
if not incoming_node:
return False
else:
__A ,__A ,__A : Any = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__A : Tuple = list(self.nodes.values() )[0]
__A : int = merging_node.is_leaf
self.prefix += merging_node.prefix
__A : Any = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__A : Optional[int] = False
# If there is 1 edge, we merge it with its child
else:
__A : Union[str, Any] = list(incoming_node.nodes.values() )[0]
__A : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__A : Optional[int] = merging_node.nodes
return True
def lowerCAmelCase__ ( self , lowerCamelCase = 0 ):
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _lowercase ():
'''simple docstring'''
__A : Optional[int] = "banana bananas bandana band apple all beast".split()
__A : Tuple = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowercase ():
'''simple docstring'''
assert test_trie()
def _lowercase ():
'''simple docstring'''
__A : Optional[int] = RadixNode()
__A : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(SCREAMING_SNAKE_CASE )
print("Words:" , SCREAMING_SNAKE_CASE )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 111
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : int = b.T
__A : Optional[Any] = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__A : List[Any] = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__A : List[Any] = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : str = aa[:, None] - 2 * ab + ba[None, :]
return d
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Union[str, Any] = x.reshape(-1 , 3 )
__A : Any = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = ['pixel_values']
def __init__( self , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = True , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Optional[int] = size if size is not None else {"height": 256, "width": 256}
__A : int = get_size_dict(lowerCamelCase )
__A : str = np.array(lowerCamelCase ) if clusters is not None else None
__A : List[Any] = do_resize
__A : Dict = size
__A : str = resample
__A : Optional[Any] = do_normalize
__A : Dict = do_color_quantize
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
lowerCamelCase , size=(size["height"], size["width"]) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None , ):
'''simple docstring'''
__A : List[str] = rescale(image=lowerCamelCase , scale=1 / 127.5 , data_format=lowerCamelCase )
__A : List[Any] = image - 1
return image
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
'''simple docstring'''
__A : List[Any] = do_resize if do_resize is not None else self.do_resize
__A : int = size if size is not None else self.size
__A : Tuple = get_size_dict(lowerCamelCase )
__A : Tuple = resample if resample is not None else self.resample
__A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__A : Any = clusters if clusters is not None else self.clusters
__A : Dict = np.array(lowerCamelCase )
__A : Optional[int] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
__A : Optional[Any] = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__A : Dict = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_normalize:
__A : List[Any] = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
__A : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__A : int = np.array(lowerCamelCase )
__A : Any = color_quantize(lowerCamelCase , lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__A : Tuple = images.shape[0]
__A : List[Any] = images.reshape(lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__A : Optional[int] = list(lowerCamelCase )
else:
__A : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__A : Optional[int] = {"input_ids": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 111
| 1
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
snake_case_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
snake_case_ : str = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
snake_case_ : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case_ : int = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
snake_case_ : Optional[Any] = "allenai"
def __a ( __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = dict((re.sub(R"@@$" , "" , __UpperCAmelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __UpperCAmelCase ), v) for k, v in d.items() )
lowerCamelCase_ : Tuple = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
lowerCamelCase_ : Any = d[k] # restore
return da
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
assert os.path.exists(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCamelCase_ : List[str] = basename(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = dirname(__UpperCAmelCase )
lowerCamelCase_ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase_ : Tuple = cls.hub_models()
lowerCamelCase_ : Dict = {"bpe": "fastbpe", "tokenizer": "moses"}
lowerCamelCase_ : Tuple = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
lowerCamelCase_ : int = hub_utils.from_pretrained(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , archive_map=__UpperCAmelCase , **__UpperCAmelCase )
lowerCamelCase_ : str = vars(chkpt["args"]["model"] )
lowerCamelCase_ : Any = args["source_lang"]
lowerCamelCase_ : List[str] = args["target_lang"]
lowerCamelCase_ : List[str] = dirname(__UpperCAmelCase )
lowerCamelCase_ : Dict = basename(__UpperCAmelCase )
# dicts
lowerCamelCase_ : Optional[int] = os.path.join(__UpperCAmelCase , f"dict.{src_lang}.txt" )
lowerCamelCase_ : List[Any] = os.path.join(__UpperCAmelCase , f"dict.{tgt_lang}.txt" )
lowerCamelCase_ : int = Dictionary.load(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ : Optional[int] = len(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = os.path.join(__UpperCAmelCase , "vocab-src.json" )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase_ : Any = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase_ : Tuple = False
break
lowerCamelCase_ : List[Any] = Dictionary.load(__UpperCAmelCase )
lowerCamelCase_ : Dict = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase_ : str = len(__UpperCAmelCase )
lowerCamelCase_ : Dict = os.path.join(__UpperCAmelCase , "vocab-tgt.json" )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# merges_file (bpecodes)
lowerCamelCase_ : str = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase_ : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
break
with open(__UpperCAmelCase , encoding="utf-8" ) as fin:
lowerCamelCase_ : Optional[int] = fin.read()
lowerCamelCase_ : int = re.sub(R" \d+$" , "" , __UpperCAmelCase , 0 , re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as fout:
fout.write(__UpperCAmelCase )
# model config
lowerCamelCase_ : Dict = os.path.join(__UpperCAmelCase , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
lowerCamelCase_ : Tuple = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.0_2,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
lowerCamelCase_ : List[Any] = 5
lowerCamelCase_ : Any = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase_ : List[Any] = best_score_hparams[model_dir]["length_penalty"]
else:
lowerCamelCase_ : Any = 1.0
print(f"Generating {fsmt_model_config_file}" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# tokenizer config
lowerCamelCase_ : Optional[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[str] = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# model
lowerCamelCase_ : Union[str, Any] = chkpt["models"][0]
lowerCamelCase_ : Union[str, Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase_ : int = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase_ : Any = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : int = FSMTConfig.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Dict = FSMTForConditionalGeneration(__UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
# save
lowerCamelCase_ : int = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : Tuple = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 253
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case_ : str = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__magic_name__ : int , **__magic_name__ : List[str] ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 253
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[Any] = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : Optional[Any] = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : str , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[Any] ) -> Any:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCamelCase , )
A = kwargs.pop('feature_extractor' )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
A = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [text] # add batch dimension (as the image processor always adds a batch dimension)
A = features['words']
A = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
A = features.pop('pixel_values' )
if return_overflowing_tokens is True:
A = self.get_overflowing_images(__UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] )
A = images
return encoded_inputs
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> str:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def __UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : int , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : List[str] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ) -> Dict:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCamelCase , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCamelCase , )
return self.image_processor
| 106
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =SpeechTaTokenizer
a_ : Dict =False
a_ : List[Any] =True
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Union[str, Any] = SpeechTaTokenizer(UpperCamelCase )
_snake_case : Tuple = AddedToken('<mask>' , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
_snake_case : Union[str, Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'this is a test'
_snake_case : Optional[int] = 'this is a test'
return input_text, output_text
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=False , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : Optional[int]=5 ):
'''simple docstring'''
_snake_case , _snake_case : str = self.get_input_output_texts(UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : List[str] = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = '<pad>'
_snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCamelCase ) , 81 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case : Any = tokenizer.vocab_size
_snake_case : Any = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : int = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_snake_case : List[Any] = tokenizer.add_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : List[Any] = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) )
_snake_case : List[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_snake_case : Dict = tokenizer.add_special_tokens(UpperCamelCase )
_snake_case : int = tokenizer.vocab_size
_snake_case : Tuple = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) )
_snake_case : str = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : Union[str, Any] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCamelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# fmt: off
self.assertListEqual(UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_snake_case : int = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCamelCase , )
| 411
| 0
|
def UpperCamelCase ( snake_case__ , snake_case__):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive")
lowerCAmelCase_ : int = str(bin(_snake_case))[2:] # remove the leading "0b"
lowerCAmelCase_ : Dict = str(bin(_snake_case))[2:] # remove the leading "0b"
lowerCAmelCase_ : Any = max(len(_snake_case) , len(_snake_case))
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1"))
for char_a, char_b in zip(a_binary.zfill(_snake_case) , b_binary.zfill(_snake_case)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase_ (lowercase__ : BertModel , lowercase__ : str , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowerCAmelCase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
lowerCAmelCase__ = model.state_dict()
def to_tf_var_name(lowercase__ : str ):
for patt, repl in iter(lowercase__ ):
lowerCAmelCase__ = name.replace(lowercase__ , lowercase__ )
return f'bert/{name}'
def create_tf_var(lowercase__ : np.ndarray , lowercase__ : str , lowercase__ : tf.Session ):
lowerCAmelCase__ = tf.dtypes.as_dtype(tensor.dtype )
lowerCAmelCase__ = tf.get_variable(dtype=lowercase__ , shape=tensor.shape , name=lowercase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowerCAmelCase__ = to_tf_var_name(lowercase__ )
lowerCAmelCase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowerCAmelCase__ = torch_tensor.T
lowerCAmelCase__ = create_tf_var(tensor=lowercase__ , name=lowercase__ , session=lowercase__ )
tf.keras.backend.set_value(lowercase__ , lowercase__ )
lowerCAmelCase__ = session.run(lowercase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowercase__ , lowercase__ )}' )
lowerCAmelCase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase__ , os.path.join(lowercase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def lowerCAmelCase_ (lowercase__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowercase__ , required=lowercase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowercase__ , required=lowercase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowercase__ , required=lowercase__ , help='''Directory in which to save tensorflow model''' )
lowerCAmelCase__ = parser.parse_args(lowercase__ )
lowerCAmelCase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowercase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 668
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "wav2vec2"
def __init__(self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="sum" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , __a=None , **__a , ) -> int:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# adapter
UpperCamelCase = add_adapter
UpperCamelCase = adapter_kernel_size
UpperCamelCase = adapter_stride
UpperCamelCase = num_adapter_layers
UpperCamelCase = output_hidden_size or hidden_size
UpperCamelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = list(__a )
UpperCamelCase = xvector_output_dim
@property
def snake_case_ (self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 544
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def decorator(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += [key]
setattr(_SCREAMING_SNAKE_CASE , "handle_key" , _SCREAMING_SNAKE_CASE )
return func
return decorator
def a__ ( *_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def decorator(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += keys
setattr(_SCREAMING_SNAKE_CASE , "handle_key" , _SCREAMING_SNAKE_CASE )
return func
return decorator
class _lowerCamelCase ( _lowercase ):
def __new__(cls , __a , __a , __a ) -> Any:
UpperCamelCase = super().__new__(cls , __a , __a , __a )
if not hasattr(__a , "key_handler" ):
setattr(__a , "key_handler" , {} )
setattr(__a , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase = getattr(__a , "handle_key" , [] )
for key in handled_keys:
UpperCamelCase = value
return new_cls
@staticmethod
def snake_case_ (cls ) -> Optional[Any]:
UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase = ord(__a )
UpperCamelCase = cls.key_handler.get(__a )
if handler:
UpperCamelCase = char
return handler(cls )
else:
return None
def a__ ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 544
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase : Any = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase : int = json.load(f)
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self , snake_case__ ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = F'facebook/wmt19-{pair}'
_lowerCAmelCase : str = self.get_tokenizer(snake_case__ )
_lowerCAmelCase : Optional[int] = self.get_model(snake_case__ )
_lowerCAmelCase : List[Any] = bleu_data[pair]['src']
_lowerCAmelCase : str = bleu_data[pair]['tgt']
_lowerCAmelCase : Tuple = tokenizer(snake_case__ , return_tensors='pt' , truncation=snake_case__ , padding='longest' ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowerCAmelCase : Tuple = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
_lowerCAmelCase : Tuple = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores['bleu'] , snake_case__ )
| 444
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""OwlViTFeatureExtractor"""]
lowerCAmelCase : int = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(a__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(a__ , "num_encoder_blocks" ) )
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=64 , a__=3 , a__=4 , a__=[2, 2, 2, 2] , a__=[8, 4, 2, 1] , a__=[16, 32, 64, 128] , a__=[1, 4, 8, 16] , a__=[1, 2, 4, 8] , a__=True , a__=True , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.02 , a__=3 , a__=None , ):
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : List[Any] = num_encoder_blocks
__SCREAMING_SNAKE_CASE : Optional[Any] = sr_ratios
__SCREAMING_SNAKE_CASE : Union[str, Any] = depths
__SCREAMING_SNAKE_CASE : int = hidden_sizes
__SCREAMING_SNAKE_CASE : Dict = downsampling_rates
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = scope
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values, labels
def a_ ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = SegformerModel(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(a__ )
__SCREAMING_SNAKE_CASE : Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Any = SegformerForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : List[Any] = SegformerForSemanticSegmentation(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
self.parent.assertGreater(result.loss , 0.0 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = config_and_inputs
__SCREAMING_SNAKE_CASE : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Union[str, Any] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = False
snake_case__ : Optional[Any] = False
snake_case__ : Dict = False
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = SegformerModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = SegformerConfigTester(self , config_class=a__ )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def a_ ( self ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def a_ ( self ):
pass
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(a__ )
__SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.attentions
__SCREAMING_SNAKE_CASE : Dict = sum(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(a__ , a__ ) )
__SCREAMING_SNAKE_CASE : List[str] = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE : List[str] = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__SCREAMING_SNAKE_CASE : str = (self.model_tester.image_size // 32) ** 2
__SCREAMING_SNAKE_CASE : str = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(a__ )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 1 , len(a__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a_ ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(a__ , a__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(a__ ) , a__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(a__ , a__ , a__ )
def a_ ( self ):
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a__ ):
continue
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(a__ )
model.to(a__ )
model.train()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
__SCREAMING_SNAKE_CASE : List[Any] = model(**a__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a_ ( self ):
pass
@slow
def a_ ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Any = SegformerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a__ , align=a__ , do_random_crop=a__ )
__SCREAMING_SNAKE_CASE : Dict = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Tuple = image_processor(images=a__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : List[Any] = encoded_inputs.pixel_values.to(a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
__SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a__ , atol=1e-4 ) )
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a__ , align=a__ , do_random_crop=a__ )
__SCREAMING_SNAKE_CASE : Any = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(a__ )
__SCREAMING_SNAKE_CASE : Tuple = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Any = encoded_inputs.pixel_values.to(a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Any = model(a__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a__ , atol=1e-1 ) )
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a__ , align=a__ , do_random_crop=a__ )
__SCREAMING_SNAKE_CASE : Dict = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
a__ )
__SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : List[Any] = encoded_inputs.pixel_values.to(a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(500, 300)] )
__SCREAMING_SNAKE_CASE : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a__ )
__SCREAMING_SNAKE_CASE : List[str] = image_processor.post_process_semantic_segmentation(outputs=a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , a__ )
| 721
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
__SCREAMING_SNAKE_CASE : int = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__SCREAMING_SNAKE_CASE : Tuple = left
__SCREAMING_SNAKE_CASE : Any = point
elif point > right:
__SCREAMING_SNAKE_CASE : Tuple = right
__SCREAMING_SNAKE_CASE : Dict = point
else:
if item < current_item:
__SCREAMING_SNAKE_CASE : Optional[int] = point - 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = point + 1
return None
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase = 0
if debug == 1:
lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowercase = 67
lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 564
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 276
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Dict = VOCAB_FILES_NAMES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : str = ['input_ids', 'attention_mask']
_A : Dict = RobertaTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
snake_case__ = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
snake_case__ = add_prefix_space
snake_case__ = pre_tok_class(**lowerCamelCase )
snake_case__ = add_prefix_space
snake_case__ = "post_processor"
snake_case__ = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
snake_case__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ = tuple(state["sep"] )
if "cls" in state:
snake_case__ = tuple(state["cls"] )
snake_case__ = False
if state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
snake_case__ = add_prefix_space
snake_case__ = True
if state.get("trim_offsets" , lowerCamelCase ) != trim_offsets:
snake_case__ = trim_offsets
snake_case__ = True
if changes_to_apply:
snake_case__ = getattr(lowerCamelCase , state.pop("type" ) )
snake_case__ = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
def A_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , lowerCamelCase ):
snake_case__ = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
snake_case__ = value
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = kwargs.get("is_split_into_words" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase=None ):
snake_case__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 276
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = MobileBertTokenizer
_UpperCAmelCase = MobileBertTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = filter_non_english
_UpperCAmelCase = 'google/mobilebert-uncased'
def snake_case ( self : int ):
super().setUp()
lowerCamelCase :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def snake_case ( self : str , __snake_case : List[Any] ):
lowerCamelCase :Dict = '''UNwant\u00E9d,running'''
lowerCamelCase :Any = '''unwanted, running'''
return input_text, output_text
def snake_case ( self : List[str] ):
lowerCamelCase :int = self.tokenizer_class(self.vocab_file )
lowerCamelCase :Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def snake_case ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase :Union[str, Any] = '''UNwant\u00E9d,running'''
lowerCamelCase :List[Any] = tokenizer.tokenize(__snake_case )
lowerCamelCase :str = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :str = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Any = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :int = self.get_rust_tokenizer()
lowerCamelCase :List[Any] = tokenizer.encode(__snake_case )
lowerCamelCase :Optional[int] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# With lower casing
lowerCamelCase :Tuple = self.get_tokenizer(do_lower_case=__snake_case )
lowerCamelCase :str = self.get_rust_tokenizer(do_lower_case=__snake_case )
lowerCamelCase :Optional[Any] = '''UNwant\u00E9d,running'''
lowerCamelCase :int = tokenizer.tokenize(__snake_case )
lowerCamelCase :Optional[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Dict = self.get_rust_tokenizer()
lowerCamelCase :Tuple = tokenizer.encode(__snake_case )
lowerCamelCase :Optional[Any] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def snake_case ( self : Dict ):
lowerCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self : str ):
lowerCamelCase :Tuple = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self : int ):
lowerCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : List[Any] ):
lowerCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase :Optional[Any] = {}
for i, token in enumerate(__snake_case ):
lowerCamelCase :List[Any] = i
lowerCamelCase :int = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def snake_case ( self : str ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def snake_case ( self : Optional[int] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def snake_case ( self : List[Any] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.get_tokenizer()
lowerCamelCase :str = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def snake_case ( self : int ):
lowerCamelCase :str = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
lowerCamelCase :Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase :Optional[int] = tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
lowerCamelCase :Tuple = tokenizer_r.do_lower_case if hasattr(__snake_case , '''do_lower_case''' ) else False
lowerCamelCase :Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = ['''的''', '''人''', '''有''']
lowerCamelCase :List[Any] = ''''''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = True
lowerCamelCase :List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :List[str] = tokenizer_r.convert_ids_to_tokens(__snake_case )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = False
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :str = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Dict = tokenizer_r.convert_ids_to_tokens(__snake_case )
lowerCamelCase :Any = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase :List[str] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = MgpstrTokenizer
A : Any = False
A : Any = {}
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'tester'
SCREAMING_SNAKE_CASE : List[Any] = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode([special_token], add_special_tokens=A )
self.assertEqual(len(A ), 1 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A, skip_special_tokens=A )
self.assertTrue(special_token not in decoded )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.get_input_output_texts(A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(A )
self.assertNotEqual(len(A ), 0 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(A )
self.assertIsInstance(A, A )
self.assertEqual(text_a.replace(' ', '' ), A )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
| 28
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''bit'''
UpperCamelCase = ['''preactivation''', '''bottleneck''']
UpperCamelCase = ['''SAME''', '''VALID''']
def __init__( self : Optional[Any] , A_ : int=3 , A_ : Any=64 , A_ : Optional[int]=[256, 512, 1024, 2048] , A_ : str=[3, 4, 6, 3] , A_ : Any="preactivation" , A_ : Optional[int]="relu" , A_ : List[Any]=None , A_ : Tuple=32 , A_ : List[str]=0.0 , A_ : str=False , A_ : Optional[int]=32 , A_ : List[str]=1 , A_ : List[str]=None , A_ : str=None , **A_ : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**A_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCamelCase_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
lowerCamelCase_ = num_channels
lowerCamelCase_ = embedding_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = layer_type
lowerCamelCase_ = hidden_act
lowerCamelCase_ = global_padding
lowerCamelCase_ = num_groups
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = embedding_dynamic_padding
lowerCamelCase_ = output_stride
lowerCamelCase_ = width_factor
lowerCamelCase_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 651
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_UpperCAmelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_UpperCAmelCase = TaTokenizerFast
_UpperCAmelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_UpperCAmelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 699
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = "SpeechT5FeatureExtractor"
snake_case = "SpeechT5Tokenizer"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self : Dict , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = kwargs.pop("""audio""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ = kwargs.pop("""text""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ = kwargs.pop("""text_target""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ = kwargs.pop("""audio_target""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ = kwargs.pop("""sampling_rate""" , _SCREAMING_SNAKE_CASE )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
lowerCamelCase__ = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif text is not None:
lowerCamelCase__ = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ = None
if audio_target is not None:
lowerCamelCase__ = self.feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase__ = targets["""input_values"""]
elif text_target is not None:
lowerCamelCase__ = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase__ = targets["""input_ids"""]
else:
lowerCamelCase__ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ = labels
lowerCamelCase__ = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCamelCase__ = decoder_attention_mask
return inputs
def __UpperCAmelCase ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = kwargs.pop("""input_values""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ = kwargs.pop("""input_ids""" , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ = kwargs.pop("""labels""" , _SCREAMING_SNAKE_CASE )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
lowerCamelCase__ = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif input_ids is not None:
lowerCamelCase__ = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and "input_ids" in labels[0]):
lowerCamelCase__ = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase__ = targets["""input_ids"""]
else:
lowerCamelCase__ = self.feature_extractor.feature_size
lowerCamelCase__ = self.feature_extractor.num_mel_bins
lowerCamelCase__ = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase__ = feature_size_hack
lowerCamelCase__ = targets["""input_values"""]
else:
lowerCamelCase__ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ = labels
lowerCamelCase__ = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCamelCase__ = decoder_attention_mask
return inputs
def __UpperCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 0
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase : List[str] = TypeVar("""T""")
def A_ ( A__ ) -> Tuple:
return (position - 1) // 2
def A_ ( A__ ) -> Union[str, Any]:
return (2 * position) + 1
def A_ ( A__ ) -> int:
return (2 * position) + 2
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self) -> None:
'''simple docstring'''
a__ : list[tuple[T, int]] = []
a__ : dict[T, int] = {}
a__ : int = 0
def __len__( self) -> int:
'''simple docstring'''
return self.elements
def __repr__( self) -> str:
'''simple docstring'''
return str(self.heap)
def __lowercase ( self) -> bool:
'''simple docstring'''
return self.elements == 0
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
self.heap.append((elem, weight))
a__ : Any = self.elements
self.elements += 1
self._bubble_up(lowercase)
def __lowercase ( self) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
a__ : Dict = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a__ : Tuple = self.heap[0]
self._bubble_down(lowercase)
return elem
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : List[Any] = self.position_map[elem]
a__ : Tuple = (elem, weight)
if position > 0:
a__ : List[str] = get_parent_position(lowercase)
a__ : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase)
else:
self._bubble_down(lowercase)
else:
self._bubble_down(lowercase)
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
a__ : int = self.position_map[elem]
if curr_pos == 0:
return None
a__ : List[Any] = get_parent_position(lowercase)
a__ : Dict = self.heap[curr_pos]
a__ : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_up(lowercase)
return None
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
a__ : int = self.position_map[elem]
a__ : Dict = self.heap[curr_pos]
a__ : str = get_child_left_position(lowercase)
a__ : int = get_child_right_position(lowercase)
if child_left_position < self.elements and child_right_position < self.elements:
a__ : Optional[Any] = self.heap[child_left_position]
a__ : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_down(lowercase)
if child_left_position < self.elements:
a__ : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_down(lowercase)
else:
return None
if child_right_position < self.elements:
a__ : int = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_down(lowercase)
return None
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : List[str] = self.heap[nodea_pos][0]
a__ : Dict = self.heap[nodea_pos][0]
a__ : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a__ : Optional[Any] = nodea_pos
a__ : int = nodea_pos
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self) -> None:
'''simple docstring'''
a__ : dict[T, dict[T, int]] = {}
a__ : int = 0
def __repr__( self) -> str:
'''simple docstring'''
return str(self.connections)
def __len__( self) -> int:
'''simple docstring'''
return self.nodes
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
if node not in self.connections:
a__ : int = {}
self.nodes += 1
def __lowercase ( self , lowercase , lowercase , lowercase) -> None:
'''simple docstring'''
self.add_node(lowercase)
self.add_node(lowercase)
a__ : List[Any] = weight
a__ : int = weight
def A_ ( A__ , ) -> Optional[int]:
a__ : dict[T, int] = {node: maxsize for node in graph.connections}
a__ : dict[T, T | None] = {node: None for node in graph.connections}
a__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__snake_case , __snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
a__ : List[str] = priority_queue.extract_min()
a__ : str = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a__ : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__snake_case , dist[neighbour] )
a__ : Tuple = node
# running prim's algorithm
while not priority_queue.is_empty():
a__ : List[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a__ : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__snake_case , dist[neighbour] )
a__ : List[str] = node
return dist, parent
| 302
|
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
return int((input_a, input_a).count(0 ) != 0 )
def SCREAMING_SNAKE_CASE ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 701
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int = 5_0 ) ->int:
lowerCamelCase__ : Any =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 174
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase = """base_with_context"""
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) ->Tuple:
lowerCamelCase__ : Any =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowerCamelCase__ : int =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Union[str, Any] =weights[f"""layers_{lyr_num}"""]
lowerCamelCase__ : Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : List[Any] =ly_weight['attention']
lowerCamelCase__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any ) ->Union[str, Any]:
lowerCamelCase__ : Tuple =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : List[Any] =weights[f"""layers_{lyr_num}"""]
lowerCamelCase__ : List[str] =ly_weight['attention']
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : Dict =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ : str =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ : Dict =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] ) ->Optional[int]:
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowerCamelCase__ : Any =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case_ )
lowerCamelCase__ : Tuple =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ : Tuple =weights[f"""layers_{lyr_num}"""]
lowerCamelCase__ : List[str] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ : Union[str, Any] =ly_weight['self_attention']
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : int =ly_weight['MultiHeadDotProductAttention_0']
lowerCamelCase__ : Any =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : Tuple =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ : Any =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ : Optional[Any] =jnp.tree_util.tree_map(onp.array , snake_case_ )
lowerCamelCase__ : Dict =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowerCamelCase__ : Optional[int] =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowerCamelCase__ : Optional[int] =inference.parse_training_gin_file(snake_case_ , snake_case_ )
lowerCamelCase__ : Tuple =inference.InferenceModel(args.checkpoint_path , snake_case_ )
lowerCamelCase__ : List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowerCamelCase__ : int =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ : Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ : int =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ : str =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case_ )
lowerCamelCase__ : int =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case_ )
lowerCamelCase__ : Any =load_decoder(ta_checkpoint['target']['decoder'] , snake_case_ )
lowerCamelCase__ : Any =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowerCamelCase__ : Optional[int] =SpectrogramDiffusionPipeline(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
lowerCAmelCase = parser.parse_args()
main(args)
| 174
| 1
|
'''simple docstring'''
import functools
from typing import Any
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
if not isinstance(lowerCamelCase_, lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(lowerCamelCase_, lowerCamelCase_ ) or not all(
isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
SCREAMING_SNAKE_CASE__ : dict[str, Any] ={}
SCREAMING_SNAKE_CASE__ : Tuple ="""WORD_KEEPER"""
for word in words:
SCREAMING_SNAKE_CASE__ : Tuple =trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
SCREAMING_SNAKE_CASE__ : Dict =trie_node[c]
SCREAMING_SNAKE_CASE__ : str =True
SCREAMING_SNAKE_CASE__ : str =len(lowerCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase__ : Union[str, Any] ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE__ : str =trie
for i in range(lowerCamelCase_, lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ : str =trie_node.get(string[i], lowerCamelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_, lowerCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 0
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_snake_case : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class A ( _a ):
def __init__( self : Any , **lowerCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , **lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = {}
_a = {}
_a = {}
# preprocess args
if "points_per_batch" in kwargs:
_a = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
_a = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
_a = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
_a = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
_a = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
_a = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
_a = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
_a = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
_a = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
_a = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
_a = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
_a = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Dict , lowerCAmelCase_ : Optional[Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCAmelCase_ , *lowerCAmelCase_ , num_workers=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=64 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : float = 5_12 / 15_00 , lowerCAmelCase_ : Optional[int] = 32 , lowerCAmelCase_ : Optional[int] = 1 , ) -> Dict:
"""simple docstring"""
_a = load_image(lowerCAmelCase_ )
_a = self.image_processor.size['''longest_edge''']
_a , _a , _a , _a = self.image_processor.generate_crop_boxes(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
_a = self.get_inference_context()
with inference_context():
_a = self._ensure_tensor_on_device(lowerCAmelCase_ , device=self.device )
_a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
_a = image_embeddings
_a = grid_points.shape[1]
_a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
_a = grid_points[:, i : i + points_per_batch, :, :]
_a = input_labels[:, i : i + points_per_batch]
_a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=0.8_8 , lowerCAmelCase_ : Tuple=0.9_5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Optional[int]=1 , ) -> str:
"""simple docstring"""
_a = model_inputs.pop('''input_boxes''' )
_a = model_inputs.pop('''is_last''' )
_a = model_inputs.pop('''original_sizes''' ).tolist()
_a = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
_a = self.model(**lowerCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_a = model_outputs['''pred_masks''']
_a = self.image_processor.post_process_masks(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , binarize=lowerCAmelCase_ )
_a = model_outputs['''iou_scores''']
_a , _a , _a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[Any]=0.7 , ) -> int:
"""simple docstring"""
_a = []
_a = []
_a = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
_a = torch.cat(lowerCAmelCase_ )
_a = torch.cat(lowerCAmelCase_ )
_a , _a , _a , _a = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a = defaultdict(lowerCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase_ )
_a = {}
if output_rle_mask:
_a = rle_mask
if output_bboxes_mask:
_a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 22
|
'''simple docstring'''
import requests
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase )
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 22
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Tuple:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__snake_case )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Union[str, Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__snake_case , id=__snake_case )
| 713
|
from __future__ import annotations
import pandas as pd
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: list[int] , lowerCAmelCase: int ) -> list[int]:
_UpperCAmelCase : List[Any] = [0] * no_of_processes
_UpperCAmelCase : Dict = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase ):
_UpperCAmelCase : List[str] = burst_time[i]
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = 9_9999_9999
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Any = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_UpperCAmelCase : Optional[int] = remaining_time[j]
_UpperCAmelCase : int = j
_UpperCAmelCase : Tuple = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_UpperCAmelCase : List[Any] = remaining_time[short]
if minm == 0:
_UpperCAmelCase : List[Any] = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
_UpperCAmelCase : int = False
# Find finish time of current process
_UpperCAmelCase : Optional[int] = increment_time + 1
# Calculate waiting time
_UpperCAmelCase : str = finish_time - arrival_time[short]
_UpperCAmelCase : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
_UpperCAmelCase : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: int , lowerCAmelCase: list[int] ) -> list[int]:
_UpperCAmelCase : str = [0] * no_of_processes
for i in range(lowerCAmelCase ):
_UpperCAmelCase : str = burst_time[i] + waiting_time[i]
return turn_around_time
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: list[int] , lowerCAmelCase: int ) -> None:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[str] = 0
for i in range(lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = total_waiting_time + waiting_time[i]
_UpperCAmelCase : int = total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
SCREAMING_SNAKE_CASE_ = int(input())
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = map(int, input().split())
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = burst_time
SCREAMING_SNAKE_CASE_ = no_of_processes
SCREAMING_SNAKE_CASE_ = waiting_time
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 467
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''gpt_neo'''
__UpperCAmelCase : Optional[int] = ['''past_key_values''']
__UpperCAmelCase : Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,_a : Optional[int]=5_0257 ,_a : Tuple=2048 ,_a : Optional[int]=2048 ,_a : Any=24 ,_a : Tuple=[[["global", "local"], 12]] ,_a : Union[str, Any]=16 ,_a : List[Any]=None ,_a : Optional[int]=256 ,_a : Optional[Any]="gelu_new" ,_a : List[Any]=0.0 ,_a : Optional[int]=0.0 ,_a : List[Any]=0.0 ,_a : Union[str, Any]=0.1 ,_a : Optional[Any]=1E-5 ,_a : Optional[Any]=0.02 ,_a : str=True ,_a : Any=5_0256 ,_a : Tuple=5_0256 ,**_a : List[str] ,):
'''simple docstring'''
_a : Dict = vocab_size
_a : Union[str, Any] = max_position_embeddings
_a : List[str] = hidden_size
_a : Optional[Any] = num_layers
_a : Optional[Any] = num_heads
_a : Dict = intermediate_size
_a : Any = window_size
_a : List[str] = activation_function
_a : int = resid_dropout
_a : Tuple = embed_dropout
_a : int = attention_dropout
_a : Dict = classifier_dropout
_a : Tuple = layer_norm_epsilon
_a : List[str] = initializer_range
_a : str = use_cache
_a : List[str] = bos_token_id
_a : Optional[Any] = eos_token_id
_a : Tuple = attention_types
_a : Union[str, Any] = self.expand_attention_types_params(_a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
@staticmethod
def __lowercase ( _a : Dict ):
'''simple docstring'''
_a : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase_ (__a : str , __a : Optional[int] , __a : Tuple , __a : Dict ):
"""simple docstring"""
import torch
_a : Tuple = input.size()
_a : Union[str, Any] = len(__a )
_a : Union[str, Any] = shape[dimension]
_a : str = torch.arange(0 , __a , __a )
_a : Optional[Any] = torch.div(sizedim - size , __a , rounding_mode='floor' ) + 1
_a : str = torch.arange(__a ) + low_indices[:min_length][:, None]
_a : Optional[Any] = [slice(__a )] * rank
_a : Dict = indices
_a : List[str] = input[s]
_a : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__a )
def UpperCAmelCase_ (__a : str , __a : Optional[int] ):
"""simple docstring"""
import torch
_a : List[str] = torch.arange(1 , __a )
_a : int = torch.remainder(__a , __a )
_a : Tuple = remainders == 0
_a : Optional[Any] = candidates[divisor_indices]
_a : List[Any] = torch.max(__a )
return largest_divisor, torch.div(__a , __a , rounding_mode='floor' )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_a ,direction='inputs' )
_a : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._config.num_heads
def __lowercase ( self : Any ,_a : PreTrainedTokenizer ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional[TensorType] = None ,):
'''simple docstring'''
_a : Dict = super(_a ,self ).generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
# We need to order the input in the way they appears in the forward()
_a : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a, _a : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a : Any = seqlen + 2
_a : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Tuple = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : List[str] = common_inputs['attention_mask']
if self.use_past:
_a : Optional[int] = ordered_inputs['attention_mask'].dtype
_a : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_a ,_a ,dtype=_a )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 229
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ():
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
'''simple docstring'''
super().__init__()
_a : Union[str, Any] = nn.Linear(3 ,4 )
_a : Optional[int] = nn.BatchNormad(4 )
_a : List[Any] = nn.Linear(4 ,5 )
def __lowercase ( self : Dict ,_a : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : Any ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a ,[128, 64, 32, 16, 8] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : Tuple ,_a : str ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_a, _a : int = mock_training_loop_function('hello' )
self.assertListEqual(_a ,[128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] ,[8, 'hello'] )
def __lowercase ( self : int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_a : Optional[Any] ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' ,cm.exception.args[0] )
def __lowercase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a : List[str] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' ,cm.exception.args[0] )
def __lowercase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : Optional[int] ,_a : Tuple ,_a : List[str] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 ,'hello' ,'world' )
self.assertIn('Batch size was passed into `f`' ,cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' ,cm.exception.args[0] )
def __lowercase ( self : str ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a : int ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' ,cm.exception.args[0] )
@require_cuda
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = torch.cuda.memory_allocated()
_a : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() ,_a )
_a : Dict = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() ,_a )
| 229
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __a , )
super().__init__(*__a , **__a )
| 717
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 462
|
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = len(SCREAMING_SNAKE_CASE__ )
__a : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__a : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__a : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__a : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__a : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 0
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : List[str] = OmegaConf.load(lowercase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase_ ) ) )
return config
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
'''simple docstring'''
if conf_path is None:
A_ : Any = """./model_checkpoints/vqgan_only.yaml"""
A_ : List[str] = load_config(lowercase_ ,display=lowercase_ )
A_ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
A_ : Optional[int] = """./model_checkpoints/vqgan_only.pt"""
A_ : Union[str, Any] = torch.load(lowercase_ ,map_location=lowercase_ )
if ".ckpt" in ckpt_path:
A_ : Dict = sd["""state_dict"""]
model.load_state_dict(lowercase_ ,strict=lowercase_ )
model.to(lowercase_ )
del sd
return model
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : str = model.encode(lowercase_ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
A_ : Tuple = model.decode(lowercase_ )
return xrec
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=False ):
'''simple docstring'''
A_ : int = string.rsplit(""".""" ,1 )
if reload:
A_ : List[Any] = importlib.import_module(lowercase_ )
importlib.reload(lowercase_ )
return getattr(importlib.import_module(lowercase_ ,package=lowercase_ ) ,cls )
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" ,{} ) )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=True ,_lowerCAmelCase=True ):
'''simple docstring'''
A_ : Union[str, Any] = instantiate_from_config(lowercase_ )
if sd is not None:
model.load_state_dict(lowercase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if ckpt:
A_ : Any = torch.load(lowercase_ ,map_location="""cpu""" )
A_ : str = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
A_ : Any = {"""state_dict""": None}
A_ : Dict = None
A_ : Optional[int] = load_model_from_config(config.model ,pl_sd["""state_dict"""] ,gpu=lowercase_ ,eval_mode=lowercase_ )["""model"""]
return model, global_step
| 720
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCAmelCase :
def __init__( self , a__ ):
if isinstance(a__ , a__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A_ : Optional[Any] = deepcopy(a__ )
elif os.path.exists(a__ ):
with io.open(a__ , """r""" , encoding="""utf-8""" ) as f:
A_ : str = json.load(a__ )
else:
try:
A_ : Dict = baseaa.urlsafe_baadecode(a__ ).decode("""utf-8""" )
A_ : List[Any] = json.loads(a__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
A_ : Any = config
self.set_stage_and_offload()
def _lowerCamelCase ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
A_ : List[str] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
A_ : Any = False
if self.is_zeroa() or self.is_zeroa():
A_ : Optional[int] = set(["""cpu""", """nvme"""] )
A_ : Dict = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A_ : Tuple = True
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = self.config
# find the config node of interest if it exists
A_ : Optional[Any] = ds_key_long.split(""".""" )
A_ : Union[str, Any] = nodes.pop()
for node in nodes:
A_ : List[str] = config.get(a__ )
if config is None:
return None, ds_key
return config, ds_key
def _lowerCamelCase ( self , a__ , a__=None ):
A_ , A_ : Union[str, Any] = self.find_config_node(a__ )
if config is None:
return default
return config.get(a__ , a__ )
def _lowerCamelCase ( self , a__ , a__=False ):
A_ : Union[str, Any] = self.config
# find the config node of interest if it exists
A_ : str = ds_key_long.split(""".""" )
for node in nodes:
A_ : int = config
A_ : int = config.get(a__ )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = self.get_value(a__ )
return False if value is None else bool(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = self.get_value(a__ )
return False if value is None else not bool(a__ )
def _lowerCamelCase ( self ):
return self._stage == 2
def _lowerCamelCase ( self ):
return self._stage == 3
def _lowerCamelCase ( self ):
return self._offload
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : Any = engine
def _lowerCamelCase ( self , a__ , **a__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(a__ , **a__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ , device_placement=a__ , scaler=a__ )
A_ : Dict = hasattr(self.optimizer , """overflow""" )
def _lowerCamelCase ( self , a__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowerCamelCase ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
def _lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCAmelCase :
def __init__( self , a__ , a__=0.001 , a__=0 , **a__ ):
A_ : List[str] = params
A_ : Any = lr
A_ : int = weight_decay
A_ : Optional[int] = kwargs
class _UpperCAmelCase :
def __init__( self , a__ , a__=None , a__=0 , **a__ ):
A_ : Union[str, Any] = optimizer
A_ : int = total_num_steps
A_ : Any = warmup_num_steps
A_ : int = kwargs
| 481
| 0
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Train language if it is different from the evaluation language."} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__magic_name__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__magic_name__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCAmelCase : Any = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowerCAmelCase : Optional[Any] = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = train_dataset.features['label'].names
if training_args.do_eval:
_lowerCAmelCase : Optional[Any] = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = eval_dataset.features['label'].names
if training_args.do_predict:
_lowerCAmelCase : Optional[int] = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Any = predict_dataset.features['label'].names
# Labels
_lowerCAmelCase : Optional[Any] = len(_A )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , idalabel={str(_A ): label for i, label in enumerate(_A )} , labelaid={label: i for i, label in enumerate(_A )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : Optional[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : str = False
def preprocess_function(_A ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_A , max_length=data_args.max_seq_length , truncation=_A , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_A ) , data_args.max_train_samples )
_lowerCAmelCase : int = train_dataset.select(range(_A ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowerCAmelCase : Optional[Any] = train_dataset.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_A ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Optional[Any] = min(len(_A ) , data_args.max_eval_samples )
_lowerCAmelCase : Optional[Any] = eval_dataset.select(range(_A ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowerCAmelCase : Optional[int] = eval_dataset.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCAmelCase : List[Any] = min(len(_A ) , data_args.max_predict_samples )
_lowerCAmelCase : List[Any] = predict_dataset.select(range(_A ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_lowerCAmelCase : Any = predict_dataset.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_lowerCAmelCase : str = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_A ):
_lowerCAmelCase : Tuple = p.predictions[0] if isinstance(p.predictions , _A ) else p.predictions
_lowerCAmelCase : Union[str, Any] = np.argmax(_A , axis=1 )
return metric.compute(predictions=_A , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : List[Any] = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : Tuple = DataCollatorWithPadding(_A , pad_to_multiple_of=8 )
else:
_lowerCAmelCase : Union[str, Any] = None
# Initialize our Trainer
_lowerCAmelCase : List[str] = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
_lowerCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : int = last_checkpoint
_lowerCAmelCase : Tuple = trainer.train(resume_from_checkpoint=_A )
_lowerCAmelCase : Union[str, Any] = train_result.metrics
_lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
_lowerCAmelCase : Union[str, Any] = min(_A , len(_A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _A )
trainer.save_metrics('train' , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase : str = trainer.evaluate(eval_dataset=_A )
_lowerCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
_lowerCAmelCase : Any = min(_A , len(_A ) )
trainer.log_metrics('eval' , _A )
trainer.save_metrics('eval' , _A )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = trainer.predict(_A , metric_key_prefix='predict' )
_lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_A )
)
_lowerCAmelCase : Tuple = min(_A , len(_A ) )
trainer.log_metrics('predict' , _A )
trainer.save_metrics('predict' , _A )
_lowerCAmelCase : Dict = np.argmax(_A , axis=1 )
_lowerCAmelCase : Dict = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_A , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_A ):
_lowerCAmelCase : Optional[Any] = label_list[item]
writer.write(f'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 444
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = TaConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase : Union[str, Any] = TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_A , _A , _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 444
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = 'laion/clap-htsat-unfused'
__magic_name__ = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , **__UpperCamelCase: List[Any] ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: int , **__UpperCamelCase: Dict ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_feature_extractor()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__magic_name__ = self.get_feature_extractor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__magic_name__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__magic_name__ = floats_list((3, 10_00) )
__magic_name__ = feature_extractor(__UpperCamelCase , return_tensors='np' )
__magic_name__ = processor(audios=__UpperCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__magic_name__ = 'This is a test string'
__magic_name__ = processor(text=__UpperCamelCase )
__magic_name__ = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(__UpperCamelCase )
__magic_name__ = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 720
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = DownBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Dict = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : int = CrossAttnDownBlockaD # noqa F405
_lowercase : Any = "down"
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : List[str] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = SkipDownBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = AttnSkipDownBlockaD # noqa F405
_lowercase : str = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[int] = DownEncoderBlockaD # noqa F405
_lowercase : List[str] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'out_channels': 32,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405
_lowercase : Optional[Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'out_channels': 32,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = UNetMidBlockaD # noqa F405
_lowercase : Any = "mid"
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'temb_channels': 1_28,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : int = "mid"
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = "mid"
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = UpBlockaD # noqa F405
_lowercase : List[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = ResnetUpsampleBlockaD # noqa F405
_lowercase : Dict = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : Union[str, Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Tuple = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase , include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = AttnUpBlockaD # noqa F405
_lowercase : Optional[int] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = SkipUpBlockaD # noqa F405
_lowercase : int = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
_lowercase : Optional[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[str] = UpDecoderBlockaD # noqa F405
_lowercase : List[str] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = {'in_channels': 32, 'out_channels': 32}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = AttnUpDecoderBlockaD # noqa F405
_lowercase : Any = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = {'in_channels': 32, 'out_channels': 32}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__UpperCamelCase )
| 184
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Dict = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCAmelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'esm'
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=768 ,_UpperCAmelCase : Union[str, Any]=12 ,_UpperCAmelCase : List[str]=12 ,_UpperCAmelCase : Tuple=3072 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]=1026 ,_UpperCAmelCase : List[str]=0.02 ,_UpperCAmelCase : Optional[int]=1E-12 ,_UpperCAmelCase : List[str]="absolute" ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,**_UpperCAmelCase : List[Any] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,mask_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = vocab_size
_a : Union[str, Any] = hidden_size
_a : Dict = num_hidden_layers
_a : int = num_attention_heads
_a : Dict = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Optional[int] = initializer_range
_a : List[Any] = layer_norm_eps
_a : int = position_embedding_type
_a : Optional[int] = use_cache
_a : Any = emb_layer_norm_before
_a : List[str] = token_dropout
_a : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_a : Dict = EsmFoldConfig()
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = EsmFoldConfig(**_UpperCAmelCase )
_a : Optional[int] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_a : Optional[int] = get_default_vocab_list()
else:
_a : Optional[int] = vocab_list
else:
_a : Optional[Any] = None
_a : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'use_esm_attn_map' ,_UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __lowercase ( self : Any ):
_a : str = super().to_dict()
if isinstance(self.esmfold_config ,_UpperCAmelCase ):
_a : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : str = None
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : float = 0
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : "TrunkConfig" = None
def __lowercase ( self : List[str] ):
if self.trunk is None:
_a : Dict = TrunkConfig()
elif isinstance(self.trunk ,_UpperCAmelCase ):
_a : str = TrunkConfig(**self.trunk )
def __lowercase ( self : List[Any] ):
_a : List[str] = asdict(self )
_a : List[str] = self.trunk.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : int = 4_8
lowerCAmelCase : int = 1_0_2_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : float = 0
lowerCAmelCase : float = 0
lowerCAmelCase : bool = False
lowerCAmelCase : int = 4
lowerCAmelCase : Optional[int] = 1_2_8
lowerCAmelCase : "StructureModuleConfig" = None
def __lowercase ( self : str ):
if self.structure_module is None:
_a : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,_UpperCAmelCase ):
_a : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_a : Optional[int] = self.sequence_state_dim // self.sequence_head_width
_a : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = asdict(self )
_a : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class __magic_name__ :
lowerCAmelCase : int = 3_8_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_6
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_2
lowerCAmelCase : int = 4
lowerCAmelCase : int = 8
lowerCAmelCase : float = 0.1
lowerCAmelCase : int = 8
lowerCAmelCase : int = 1
lowerCAmelCase : int = 2
lowerCAmelCase : int = 7
lowerCAmelCase : int = 1_0
lowerCAmelCase : float = 1e-8
lowerCAmelCase : float = 1e5
def __lowercase ( self : str ):
return asdict(self )
def __lowerCamelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 358
| 0
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a= logging.get_logger(__name__)
a= {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''align_text_model'''
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1E-1_2 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
__UpperCamelCase : Union[str, Any] = vocab_size
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Union[str, Any] = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Any = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : int = attention_probs_dropout_prob
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : int = type_vocab_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : str = layer_norm_eps
__UpperCamelCase : Dict = position_embedding_type
__UpperCamelCase : int = use_cache
__UpperCamelCase : Dict = pad_token_id
@classmethod
def lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ):
cls._set_token_in_kwargs(_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase : List[str] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__UpperCamelCase : Tuple = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''align_vision_model'''
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 6_0_0 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _lowerCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.2_5 , _lowerCamelCase = "swish" , _lowerCamelCase = 2_5_6_0 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.0_2 , _lowerCamelCase = 0.0_0_1 , _lowerCamelCase = 0.9_9 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : List[Any] = image_size
__UpperCamelCase : Optional[Any] = width_coefficient
__UpperCamelCase : Optional[Any] = depth_coefficient
__UpperCamelCase : Any = depth_divisor
__UpperCamelCase : int = kernel_sizes
__UpperCamelCase : str = in_channels
__UpperCamelCase : Union[str, Any] = out_channels
__UpperCamelCase : Any = depthwise_padding
__UpperCamelCase : Tuple = strides
__UpperCamelCase : Union[str, Any] = num_block_repeats
__UpperCamelCase : Union[str, Any] = expand_ratios
__UpperCamelCase : Optional[int] = squeeze_expansion_ratio
__UpperCamelCase : List[Any] = hidden_act
__UpperCamelCase : Tuple = hidden_dim
__UpperCamelCase : Any = pooling_type
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : int = batch_norm_eps
__UpperCamelCase : str = batch_norm_momentum
__UpperCamelCase : str = drop_connect_rate
__UpperCamelCase : List[str] = sum(_lowerCamelCase ) * 4
@classmethod
def lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ):
cls._set_token_in_kwargs(_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase : Tuple = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__UpperCamelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''align'''
SCREAMING_SNAKE_CASE__ = True
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=6_4_0 , _lowerCamelCase=1.0 , _lowerCamelCase=0.0_2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
if text_config is None:
__UpperCamelCase : int = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__UpperCamelCase : List[str] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__UpperCamelCase : Dict = AlignTextConfig(**_lowerCamelCase )
__UpperCamelCase : List[str] = AlignVisionConfig(**_lowerCamelCase )
__UpperCamelCase : str = projection_dim
__UpperCamelCase : Optional[int] = temperature_init_value
__UpperCamelCase : Any = initializer_range
@classmethod
def lowerCAmelCase ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase )
def lowerCAmelCase ( self ):
__UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCamelCase : Union[str, Any] = self.text_config.to_dict()
__UpperCamelCase : Optional[int] = self.vision_config.to_dict()
__UpperCamelCase : str = self.__class__.model_type
return output
| 287
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a= {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287
| 1
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCAmelCase )
lowerCAmelCase = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
lowerCAmelCase = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowerCAmelCase = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCAmelCase = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase = "TransientGlobalSelfAttention"
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
lowerCAmelCase = f'layers_{str(_lowerCAmelCase )}'
# Self-Attention
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowerCAmelCase = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowerCAmelCase = flax_model.params["encoder"]["block"][str(_lowerCAmelCase )]["layer"]
lowerCAmelCase = tax_attention_key
lowerCAmelCase = tax_attention_out
lowerCAmelCase = tax_attention_query
lowerCAmelCase = tax_attention_value
lowerCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase = tax_global_layer_norm
if split_mlp_wi:
lowerCAmelCase = tax_mlp_wi_a
lowerCAmelCase = tax_mlp_wi_a
else:
lowerCAmelCase = tax_mlp_wi
lowerCAmelCase = tax_mlp_wo
lowerCAmelCase = tax_mlp_layer_norm
lowerCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
lowerCAmelCase = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowerCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCAmelCase = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowerCAmelCase = tax_encoder_global_rel_embedding
# Assigning
lowerCAmelCase = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowerCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCAmelCase = f'layers_{str(_lowerCAmelCase )}'
# Self-Attention
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowerCAmelCase = tax_enc_dec_attention_module["key"]["kernel"]
lowerCAmelCase = tax_enc_dec_attention_module["out"]["kernel"]
lowerCAmelCase = tax_enc_dec_attention_module["query"]["kernel"]
lowerCAmelCase = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowerCAmelCase = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowerCAmelCase = flax_model.params["decoder"]["block"][str(_lowerCAmelCase )]["layer"]
lowerCAmelCase = tax_attention_key
lowerCAmelCase = tax_attention_out
lowerCAmelCase = tax_attention_query
lowerCAmelCase = tax_attention_value
lowerCAmelCase = tax_pre_attention_layer_norm
lowerCAmelCase = tax_enc_dec_attention_key
lowerCAmelCase = tax_enc_dec_attention_out
lowerCAmelCase = tax_enc_dec_attention_query
lowerCAmelCase = tax_enc_dec_attention_value
lowerCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
lowerCAmelCase = tax_mlp_wi_a
lowerCAmelCase = tax_mlp_wi_a
else:
lowerCAmelCase = tax_mlp_wi
lowerCAmelCase = tax_mlp_wo
lowerCAmelCase = txa_mlp_layer_norm
lowerCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
lowerCAmelCase = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowerCAmelCase = txa_decoder_norm
# Only for layer 0:
lowerCAmelCase = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowerCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
lowerCAmelCase = tax_model["target"]["token_embedder"]["embedding"]
lowerCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCAmelCase = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCAmelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 433
|
import os
def A_ ( ) -> Union[str, Any]:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCamelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCamelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 629
| 0
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: dict ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Dict = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase : Union[str, Any] = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowercase_ ( _lowerCamelCase: dict , _lowerCamelCase: int , _lowerCamelCase: set , _lowerCamelCase: set ) -> str:
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
def __init__( self : str , UpperCAmelCase : TransformeraDModel , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : KarrasDiffusionSchedulers , UpperCAmelCase : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=UpperCAmelCase , vae=UpperCAmelCase , scheduler=UpperCAmelCase )
# create a imagenet -> id dictionary for easier use
__lowerCamelCase : Optional[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowerCamelCase : List[str] = int(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = dict(sorted(self.labels.items() ) )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, List[str]] ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[str] = list(UpperCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : float = 4.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
__lowerCamelCase : int = len(UpperCAmelCase )
__lowerCamelCase : Any = self.transformer.config.sample_size
__lowerCamelCase : Dict = self.transformer.config.in_channels
__lowerCamelCase : Any = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
__lowerCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCamelCase : Optional[int] = torch.tensor(UpperCAmelCase , device=self.device ).reshape(-1 )
__lowerCamelCase : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
__lowerCamelCase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCamelCase : str = latent_model_input[: len(UpperCAmelCase ) // 2]
__lowerCamelCase : Optional[Any] = torch.cat([half, half] , dim=0 )
__lowerCamelCase : Dict = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = t
if not torch.is_tensor(UpperCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCamelCase : List[str] = latent_model_input.device.type == "mps"
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__lowerCamelCase : Optional[Any] = torch.intaa if is_mps else torch.intaa
__lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=UpperCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCamelCase : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCamelCase : Union[str, Any] = self.transformer(
UpperCAmelCase , timestep=UpperCAmelCase , class_labels=UpperCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
__lowerCamelCase , __lowerCamelCase : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.split(UpperCAmelCase , len(UpperCAmelCase ) // 2 , dim=0 )
__lowerCamelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCamelCase : Union[str, Any] = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCamelCase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCamelCase , __lowerCamelCase : int = torch.split(UpperCAmelCase , UpperCAmelCase , dim=1 )
else:
__lowerCamelCase : int = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCamelCase : Optional[Any] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
if guidance_scale > 1:
__lowerCamelCase , __lowerCamelCase : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCamelCase : Optional[Any] = latent_model_input
__lowerCamelCase : Tuple = 1 / self.vae.config.scaling_factor * latents
__lowerCamelCase : Any = self.vae.decode(UpperCAmelCase ).sample
__lowerCamelCase : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : Optional[int] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : Dict = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 366
| 0
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowerCAmelCase = True
except ImportError:
_lowerCAmelCase = False
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( __snake_case ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( __snake_case ):
@staticmethod
def UpperCamelCase_ ( _A : str ):
_UpperCamelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowerCamelCase__ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowerCamelCase__ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[Any] , _A : Optional[int] , _A : Any , _A : int=None , *_A : List[str] ):
_UpperCamelCase = testing
_UpperCamelCase = testing_file
_UpperCamelCase = path
def UpperCamelCase_ ( self : Optional[Any] ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_UpperCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
_UpperCamelCase = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_UpperCamelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
_UpperCamelCase = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
_UpperCamelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
_UpperCamelCase = json.load(lowerCamelCase__ )
_UpperCamelCase = configuration['''lowercase_modelname''']
_UpperCamelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
_UpperCamelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
_UpperCamelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
_UpperCamelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
_UpperCamelCase = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(_A : Optional[int] ):
with open(lowerCamelCase__ , '''r''' ) as f:
_UpperCamelCase = f.readlines()
with open(lowerCamelCase__ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_A : Dict , _A : str , _A : int ):
# Create temp file
_UpperCamelCase , _UpperCamelCase = mkstemp()
_UpperCamelCase = False
with fdopen(lowerCamelCase__ , '''w''' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
_UpperCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(_A : List[str] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_A : Any ):
with open(lowerCamelCase__ ) as datafile:
_UpperCamelCase = []
_UpperCamelCase = False
_UpperCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_UpperCamelCase = line.split('''\"''' )[1]
_UpperCamelCase = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
_UpperCamelCase = line.split('''\"''' )[1]
_UpperCamelCase = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCamelCase = []
elif "# Replace with" in line and "##" not in line:
_UpperCamelCase = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(lowerCamelCase__ )
| 10
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str = "laptop" ):
A__ = F"""https://www.amazon.in/laptop/s?k={product}"""
A__ = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
A__ = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A__ = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
A__ = item.ha.text
A__ = """https://www.amazon.in/""" + item.ha.a["""href"""]
A__ = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
A__ = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
A__ = """Not available"""
try:
A__ = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
A__ = """"""
try:
A__ = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
A__ = float("""nan""" )
except AttributeError:
pass
A__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ = """ """
A__ = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 574
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _snake_case :
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = inputs['prompt']
SCREAMING_SNAKE_CASE = inputs['generator']
SCREAMING_SNAKE_CASE = inputs['num_inference_steps']
SCREAMING_SNAKE_CASE = inputs['output_type']
if "image" in inputs:
SCREAMING_SNAKE_CASE = inputs['image']
else:
SCREAMING_SNAKE_CASE = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE = inputs['mask_image']
else:
SCREAMING_SNAKE_CASE = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE = inputs['original_image']
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe.encode_prompt(a)
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE = image
if mask_image is not None:
SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a , a , a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = inputs['generator']
SCREAMING_SNAKE_CASE = inputs['num_inference_steps']
SCREAMING_SNAKE_CASE = inputs['output_type']
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE = image
if mask_image is not None:
SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE = original_image
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(to_np(a) - to_np(a)).max()
self.assertLess(a , 1E-4)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(to_np(a) - to_np(a)).max()
self.assertLess(a , 1E-4)
| 444
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = b.T
SCREAMING_SNAKE_CASE = np.sum(np.square(_UpperCAmelCase) , axis=1)
SCREAMING_SNAKE_CASE = np.sum(np.square(_UpperCAmelCase) , axis=0)
SCREAMING_SNAKE_CASE = np.matmul(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = x.reshape(-1 , 3)
SCREAMING_SNAKE_CASE = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase)
return np.argmin(_UpperCAmelCase , axis=1)
class _snake_case ( A__ ):
_lowercase : str = ['''pixel_values''']
def __init__( self , a = None , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = True , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE = get_size_dict(a)
SCREAMING_SNAKE_CASE = np.array(a) if clusters is not None else None
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = do_color_quantize
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = PILImageResampling.BILINEAR , a = None , **a , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''')
return resize(
a , size=(size['height'], size['width']) , resample=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = rescale(image=a , scale=1 / 1_27.5 , data_format=a)
SCREAMING_SNAKE_CASE = image - 1
return image
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(a)
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE = np.array(a)
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=a , size=a , resample=a) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=a) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE = np.array(a)
SCREAMING_SNAKE_CASE = color_quantize(a , a).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE = images.shape[0]
SCREAMING_SNAKE_CASE = images.reshape(a , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE = list(a)
else:
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'input_ids': images}
return BatchFeature(data=a , tensor_type=a)
| 444
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__A : Union[str, Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = (IPNDMScheduler,)
snake_case__ : List[str] = (('num_inference_steps', 50),)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {"""num_train_timesteps""": 1000}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowercase = dummy_past_residuals[:]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 197
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowercase = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCAmelCase__ )
lowercase = [True] * (num + 1)
lowercase = []
lowercase = 2
lowercase = int(math.sqrt(lowerCAmelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase__ ):
if sieve[i] is True:
lowercase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 197
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Union[str, Any]=18 , lowerCAmelCase__ : List[str]=30 , lowerCAmelCase__ : Optional[int]=400 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else {"shortest_edge": 20}
SCREAMING_SNAKE_CASE_: Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[int] = batch_size
SCREAMING_SNAKE_CASE_: str = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_: Dict = min_resolution
SCREAMING_SNAKE_CASE_: str = max_resolution
SCREAMING_SNAKE_CASE_: List[Any] = do_resize
SCREAMING_SNAKE_CASE_: List[Any] = size
SCREAMING_SNAKE_CASE_: Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE_: Union[str, Any] = crop_size
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = MobileNetVaImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "size"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop"))
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size"))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
SCREAMING_SNAKE_CASE_: str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_: Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : int):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : int):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_: int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 671
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Any = """mid_block.attentions.0."""
lowerCAmelCase : Dict = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : int = f'''mid_block.resnets.{j}.'''
lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( _UpperCAmelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_: Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = v
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : List[str] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
lowerCAmelCase : Tuple = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def A_ ( _UpperCAmelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = v
SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: List[str] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )]
SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_: Tuple = [None, None, None]
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )]
SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None]
SCREAMING_SNAKE_CASE_: List[str] = v
continue
SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase )
return new_state_dict
def A_ ( _UpperCAmelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : str = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : int = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 671
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_A : int = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : Tuple , A : list[T] , A : Callable[[T, T], T] ) ->int:
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : List[Any] = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str = [any_type for _ in range(self.N )] + arr
lowerCamelCase__ : Dict = fnc
self.build()
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
for p in range(self.N - 1 , 0 , -1 ):
lowerCamelCase__ : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCamelCase ( self : List[Any] , A : int , A : T ) ->List[Any]:
p += self.N
lowerCamelCase__ : Any = v
while p > 1:
lowerCamelCase__ : Optional[int] = p // 2
lowerCamelCase__ : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCamelCase ( self : List[Any] , A : int , A : int ) ->Optional[int]: # noqa: E741
lowerCamelCase__ , lowerCamelCase__ : List[Any] = l + self.N, r + self.N
lowerCamelCase__ : Union[str, Any] = None
while l <= r:
if l % 2 == 1:
lowerCamelCase__ : Union[str, Any] = self.st[l] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
lowerCamelCase__ : Dict = self.st[r] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[r] )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_A : int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_A : Tuple = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_A : Optional[int] = SegmentTree(test_array, min)
_A : Union[str, Any] = SegmentTree(test_array, max)
_A : str = SegmentTree(test_array, lambda a, b: a + b)
def _a ( ) -> List[Any]:
"""simple docstring"""
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = reduce(_UpperCAmelCase , test_array[i : j + 1] )
lowerCamelCase__ : int = reduce(_UpperCAmelCase , test_array[i : j + 1] )
lowerCamelCase__ : int = reduce(lambda UpperCAmelCase , UpperCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase , _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
_A : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 716
|
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->int:
lowerCamelCase__ : Optional[Any] = (0, 0)
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = 0
def __eq__( self : Optional[int] , A : Optional[Any] ) ->List[Any]:
return self.position == cell.position
def __lowerCamelCase ( self : List[str] ) ->int:
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , A : List[str]=(5, 5) ) ->Optional[int]:
lowerCamelCase__ : int = np.zeros(A )
lowerCamelCase__ : Optional[int] = world_size[0]
lowerCamelCase__ : Optional[int] = world_size[1]
def __lowerCamelCase ( self : List[str] ) ->List[str]:
print(self.w )
def __lowerCamelCase ( self : Union[str, Any] , A : str ) ->Optional[Any]:
lowerCamelCase__ : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCamelCase__ : List[Any] = cell.position[0]
lowerCamelCase__ : Union[str, Any] = cell.position[1]
lowerCamelCase__ : int = []
for n in neughbour_cord:
lowerCamelCase__ : Tuple = current_x + n[0]
lowerCamelCase__ : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCamelCase__ : List[Any] = Cell()
lowerCamelCase__ : Tuple = (x, y)
lowerCamelCase__ : List[Any] = cell
neighbours.append(A )
return neighbours
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[str] = []
_open.append(UpperCAmelCase )
while _open:
lowerCamelCase__ : Any = np.argmin([n.f for n in _open] )
lowerCamelCase__ : List[str] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase ):
for c in _closed:
if c == n:
continue
lowerCamelCase__ : Any = current.g + 1
lowerCamelCase__ , lowerCamelCase__ : str = n.position
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = goal.position
lowerCamelCase__ : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCamelCase__ : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = []
while current.parent is not None:
path.append(current.position )
lowerCamelCase__ : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_A : Dict = Gridworld()
# Start position and goal
_A : Any = Cell()
_A : int = (0, 0)
_A : Optional[int] = Cell()
_A : Tuple = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
_A : int = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_A : List[Any] = 1
print(world.w)
| 130
| 0
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 1_2_8 , snake_case_ = 2_5_6 , snake_case_ = 2_0_0_0.0 , snake_case_ = 7_6_8 , snake_case_ = 1_2 , snake_case_ = 1_2 , snake_case_ = 6_4 , snake_case_ = 2_0_4_8 , snake_case_ = 0.1 , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = nn.Sequential(
nn.Linear(a_ , d_model * 4 , bias=a_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a_ ) , nn.SiLU() , )
__lowercase = nn.Embedding(a_ , a_ )
__lowercase = False
__lowercase = nn.Linear(a_ , a_ , bias=a_ )
__lowercase = nn.Dropout(p=a_ )
__lowercase = nn.ModuleList()
for lyr_num in range(a_ ):
# FiLM conditional T5 decoder
__lowercase = DecoderLayer(d_model=a_ , d_kv=a_ , num_heads=a_ , d_ff=a_ , dropout_rate=a_ )
self.decoders.append(a_ )
__lowercase = TaLayerNorm(a_ )
__lowercase = nn.Dropout(p=a_ )
__lowercase = nn.Linear(a_ , a_ , bias=a_ )
def A ( self , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase , __lowercase , __lowercase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowercase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowercase = self.conditioning_emb(a_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowercase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowercase = torch.broadcast_to(
torch.arange(a_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowercase = self.position_encoding(a_ )
__lowercase = self.continuous_inputs_projection(a_ )
inputs += position_encodings
__lowercase = self.dropout(a_ )
# decoder: No padding present.
__lowercase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowercase = [(x, self.encoder_decoder_mask(a_ , a_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowercase = lyr(
a_ , conditioning_emb=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )[0]
__lowercase = self.decoder_norm(a_ )
__lowercase = self.post_dropout(a_ )
__lowercase = self.spec_out(a_ )
return spec_out
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=1e-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__lowercase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a_ , d_kv=a_ , num_heads=a_ , dropout_rate=a_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a_ , d_kv=a_ , num_heads=a_ , dropout_rate=a_ , layer_norm_epsilon=a_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a_ , d_ff=a_ , dropout_rate=a_ , layer_norm_epsilon=a_ ) )
def A ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> int:
'''simple docstring'''
__lowercase = self.layer[0](
a_ , conditioning_emb=a_ , attention_mask=a_ , )
if encoder_hidden_states is not None:
__lowercase = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
__lowercase = self.layer[1](
a_ , key_value_states=a_ , attention_mask=a_ , )
# Apply Film Conditional Feed Forward layer
__lowercase = self.layer[-1](a_ , a_ )
return (hidden_states,)
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__lowercase = TaLayerNorm(a_ )
__lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=a_ )
__lowercase = Attention(query_dim=a_ , heads=a_ , dim_head=a_ , out_bias=a_ , scale_qk=a_ )
__lowercase = nn.Dropout(a_ )
def A ( self , snake_case_ , snake_case_=None , snake_case_=None , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.layer_norm(a_ )
if conditioning_emb is not None:
__lowercase = self.FiLMLayer(a_ , a_ )
# Self-attention block
__lowercase = self.attention(a_ )
__lowercase = hidden_states + self.dropout(a_ )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowercase = Attention(query_dim=a_ , heads=a_ , dim_head=a_ , out_bias=a_ , scale_qk=a_ )
__lowercase = TaLayerNorm(a_ , eps=a_ )
__lowercase = nn.Dropout(a_ )
def A ( self , snake_case_ , snake_case_=None , snake_case_=None , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.layer_norm(a_ )
__lowercase = self.attention(
a_ , encoder_hidden_states=a_ , attention_mask=attention_mask.squeeze(1 ) , )
__lowercase = hidden_states + self.dropout(a_ )
return layer_output
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
'''simple docstring'''
super().__init__()
__lowercase = TaDenseGatedActDense(d_model=a_ , d_ff=a_ , dropout_rate=a_ )
__lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=a_ )
__lowercase = TaLayerNorm(a_ , eps=a_ )
__lowercase = nn.Dropout(a_ )
def A ( self , snake_case_ , snake_case_=None ) -> str:
'''simple docstring'''
__lowercase = self.layer_norm(a_ )
if conditioning_emb is not None:
__lowercase = self.film(a_ , a_ )
__lowercase = self.DenseReluDense(a_ )
__lowercase = hidden_states + self.dropout(a_ )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
'''simple docstring'''
super().__init__()
__lowercase = nn.Linear(a_ , a_ , bias=a_ )
__lowercase = nn.Linear(a_ , a_ , bias=a_ )
__lowercase = nn.Linear(a_ , a_ , bias=a_ )
__lowercase = nn.Dropout(a_ )
__lowercase = NewGELUActivation()
def A ( self , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.act(self.wi_a(a_ ) )
__lowercase = self.wi_a(a_ )
__lowercase = hidden_gelu * hidden_linear
__lowercase = self.dropout(a_ )
__lowercase = self.wo(a_ )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1e-6 ) -> str:
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.ones(a_ ) )
__lowercase = eps
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a_ )
__lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowercase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def A ( self , snake_case_ ) -> List[Any]:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(a_ , 3.0 )) ))
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
super().__init__()
__lowercase = nn.Linear(a_ , out_features * 2 , bias=a_ )
def A ( self , snake_case_ , snake_case_ ) -> str:
'''simple docstring'''
__lowercase = self.scale_bias(a_ )
__lowercase , __lowercase = torch.chunk(a_ , 2 , -1 )
__lowercase = x * (1 + scale) + shift
return x
| 639
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> list:
__snake_case = len(_UpperCAmelCase )
__snake_case = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
__snake_case = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
__snake_case = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 69
| 0
|
from string import ascii_uppercase
snake_case_ : Union[str, Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
lowerCamelCase_ : Union[str, Any] = ""
lowerCamelCase_ : Dict = 0
lowerCamelCase_ : Optional[Any] = 0
while div != 1:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = divmod(__UpperCAmelCase , __UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCamelCase_ : int = ALPHABET_VALUES[str(__UpperCAmelCase )]
else:
lowerCamelCase_ : int = str(__UpperCAmelCase )
new_value += actual_value
lowerCamelCase_ : Union[str, Any] = num // base
lowerCamelCase_ : Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 253
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Dict = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Union[str, Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
snake_case_ : Any = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = LxmertTokenizer
def __init__( self : int , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=None , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]="[UNK]" , __magic_name__ : str="[SEP]" , __magic_name__ : int="[PAD]" , __magic_name__ : Tuple="[CLS]" , __magic_name__ : Any="[MASK]" , __magic_name__ : str=True , __magic_name__ : int=None , **__magic_name__ : Any , ) -> str:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __magic_name__ ) != do_lower_case
or normalizer_state.get("strip_accents" , __magic_name__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __magic_name__ ) != tokenize_chinese_chars
):
lowerCamelCase_ : int = getattr(__magic_name__ , normalizer_state.pop("type" ) )
lowerCamelCase_ : Optional[int] = do_lower_case
lowerCamelCase_ : int = strip_accents
lowerCamelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCamelCase_ : Optional[Any] = normalizer_class(**__magic_name__ )
lowerCamelCase_ : int = do_lower_case
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str , __magic_name__ : Dict=None ) -> Dict:
lowerCamelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 253
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase__ = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
lowercase__ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__(self , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="m2m100" , _lowercase = None , _lowercase=8 , **_lowercase , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__a : List[str] = language_codes
__a : str = FAIRSEQ_LANGUAGE_CODES[language_codes]
__a : Optional[int] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
__a : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(_lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowercase , tgt_lang=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , language_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowercase , **_lowercase , )
__a : Optional[Any] = vocab_file
__a : List[Any] = load_json(_lowercase )
__a : List[str] = {v: k for k, v in self.encoder.items()}
__a : List[Any] = spm_file
__a : int = load_spm(_lowercase , self.sp_model_kwargs )
__a : Dict = len(self.encoder )
__a : Optional[int] = {
self.get_lang_token(_lowercase ): self.encoder_size + i for i, lang_code in enumerate(_lowercase )
}
__a : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowercase )}
__a : Tuple = {v: k for k, v in self.lang_token_to_id.items()}
__a : List[str] = src_lang if src_lang is not None else """en"""
__a : List[Any] = tgt_lang
__a : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__a : Optional[Any] = num_madeup_words
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowercase , self.encoder[self.unk_token] )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowercase , self.unk_token )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = []
__a : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase ) + token
__a : Optional[Any] = []
else:
current_sub_tokens.append(_lowercase )
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
__a : str = [1] * len(self.prefix_tokens )
__a : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
__a : Optional[Any] = self.__dict__.copy()
__a : List[str] = None
return state
def __setstate__(self , _lowercase ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : List[str] = {}
__a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Tuple = Path(_lowercase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
__a : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__a : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowercase )
elif not os.path.isfile(self.spm_file ):
with open(_lowercase , """wb""" ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (str(_lowercase ), str(_lowercase ))
def lowerCAmelCase__(self , _lowercase , _lowercase = "en" , _lowercase = None , _lowercase = "ro" , **_lowercase , ):
'''simple docstring'''
__a : Dict = src_lang
__a : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__a : Dict = src_lang
__a : List[str] = self(_lowercase , add_special_tokens=_lowercase , **_lowercase )
__a : Union[str, Any] = self.get_lang_id(_lowercase )
__a : Tuple = tgt_lang_id
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : str = self.get_lang_token(_lowercase )
__a : Union[str, Any] = self.lang_token_to_id[lang_token]
__a : Optional[Any] = [self.cur_lang_id]
__a : List[str] = [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = self.get_lang_token(_lowercase )
__a : Union[str, Any] = self.lang_token_to_id[lang_token]
__a : Tuple = [self.cur_lang_id]
__a : Dict = [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self.get_lang_token(_lowercase )
return self.lang_token_to_id[lang_token]
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ):
__a : Optional[int] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def __magic_name__ ( _lowerCamelCase : str ):
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 581
| 0
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase (__lowerCamelCase : List[Any] ) -> Union[str, Any]:
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase (__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> str:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase (__lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> Optional[int]:
a__ = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def _lowerCamelCase (__lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=7_0000 ) -> List[str]:
a__ = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
a__ = np.dot(__lowerCamelCase , __lowerCamelCase )
a__ = sigmoid_function(__lowerCamelCase )
a__ = np.dot(x.T , h - y ) / y.size
a__ = theta - alpha * gradient # updating the weights
a__ = np.dot(__lowerCamelCase , __lowerCamelCase )
a__ = sigmoid_function(__lowerCamelCase )
a__ = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCAmelCase_ : int = datasets.load_iris()
lowerCAmelCase_ : Optional[int] = iris.data[:, :2]
lowerCAmelCase_ : Union[str, Any] = (iris.target != 0) * 1
lowerCAmelCase_ : Tuple = 0.1
lowerCAmelCase_ : int = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def _lowerCamelCase (__lowerCamelCase : List[Any] ) -> Tuple:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((lowerCAmelCase_) , (lowerCAmelCase_)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = (x[:, 1].min(), x[:, 1].max())
((lowerCAmelCase_) , (lowerCAmelCase_)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCAmelCase_ : List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
lowerCAmelCase_ : str = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 289
|
'''simple docstring'''
class UpperCamelCase__ :
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : List[str]=None , lowerCamelCase : int=None ):
'''simple docstring'''
a__ = data
a__ = previous
a__ = next_node
def __str__( self : List[Any] ):
'''simple docstring'''
return F'''{self.data}'''
def __a ( self : List[Any] ):
'''simple docstring'''
return self.data
def __a ( self : Tuple ):
'''simple docstring'''
return self.next
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return self.previous
class UpperCamelCase__ :
def __init__( self : int , lowerCamelCase : Tuple ):
'''simple docstring'''
a__ = head
def __iter__( self : Any ):
'''simple docstring'''
return self
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__ = self.current.get_data()
a__ = self.current.get_next()
return value
class UpperCamelCase__ :
def __init__( self : Tuple ):
'''simple docstring'''
a__ = None # First node in list
a__ = None # Last node in list
def __str__( self : Any ):
'''simple docstring'''
a__ = self.head
a__ = []
while current is not None:
nodes.append(current.get_data() )
a__ = current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__( self : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
a__ = self.head
while current:
if current.get_data() == value:
return True
a__ = current.get_next()
return False
def __iter__( self : int ):
'''simple docstring'''
return LinkedListIterator(self.head )
def __a ( self : str ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __a ( self : Union[str, Any] ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __a ( self : List[Any] , lowerCamelCase : Node ):
'''simple docstring'''
if self.head is None:
a__ = node
a__ = node
else:
self.insert_before_node(self.head , lowerCamelCase )
def __a ( self : List[str] , lowerCamelCase : Node ):
'''simple docstring'''
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail , lowerCamelCase )
def __a ( self : List[str] , lowerCamelCase : int ):
'''simple docstring'''
a__ = Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def __a ( self : Any , lowerCamelCase : Node , lowerCamelCase : Node ):
'''simple docstring'''
a__ = node
a__ = node.previous
if node.get_previous() is None:
a__ = node_to_insert
else:
a__ = node_to_insert
a__ = node_to_insert
def __a ( self : Tuple , lowerCamelCase : Node , lowerCamelCase : Node ):
'''simple docstring'''
a__ = node
a__ = node.next
if node.get_next() is None:
a__ = node_to_insert
else:
a__ = node_to_insert
a__ = node_to_insert
def __a ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
a__ = 1
a__ = Node(lowerCamelCase )
a__ = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase , lowerCamelCase )
return
current_position += 1
a__ = node.next
self.insert_after_node(self.tail , lowerCamelCase )
def __a ( self : List[Any] , lowerCamelCase : int ):
'''simple docstring'''
a__ = self.head
while node:
if node.get_data() == item:
return node
a__ = node.get_next()
raise Exception("Node not found" )
def __a ( self : List[Any] , lowerCamelCase : List[str] ):
'''simple docstring'''
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
a__ = self.head.get_next()
if node == self.tail:
a__ = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def __a ( lowerCamelCase : Node ):
'''simple docstring'''
if node.get_next():
a__ = node.previous
if node.get_previous():
a__ = node.next
a__ = None
a__ = None
def __a ( self : List[str] ):
'''simple docstring'''
return self.head is None
def _lowerCamelCase () -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( A__ ):
def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=True , a__=False , a__=False , a__=False , a__=2 , a__=9_9 , a__=0 , a__=3_2 , a__=5 , a__=4 , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_2 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__="last" , a__=None , a__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_lengths
A__ = use_token_type_ids
A__ = use_labels
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = vocab_size
A__ = n_special
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = summary_type
A__ = use_proj
A__ = scope
def snake_case_ ( self):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_input_lengths:
A__ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , 2).float()
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case_ ( self):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = FlaubertModel(config=a__)
model.to(a__)
model.eval()
A__ = model(a__ , lengths=a__ , langs=a__)
A__ = model(a__ , langs=a__)
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = FlaubertWithLMHeadModel(a__)
model.to(a__)
model.eval()
A__ = model(a__ , token_type_ids=a__ , labels=a__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = FlaubertForQuestionAnsweringSimple(a__)
model.to(a__)
model.eval()
A__ = model(a__)
A__ = model(a__ , start_positions=a__ , end_positions=a__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = FlaubertForQuestionAnswering(a__)
model.to(a__)
model.eval()
A__ = model(a__)
A__ = model(
a__ , start_positions=a__ , end_positions=a__ , cls_index=a__ , is_impossible=a__ , p_mask=a__ , )
A__ = model(
a__ , start_positions=a__ , end_positions=a__ , cls_index=a__ , is_impossible=a__ , )
((A__) , ) = result_with_labels.to_tuple()
A__ = model(a__ , start_positions=a__ , end_positions=a__)
((A__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = FlaubertForSequenceClassification(a__)
model.to(a__)
model.eval()
A__ = model(a__)
A__ = model(a__ , labels=a__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = self.num_labels
A__ = FlaubertForTokenClassification(a__)
model.to(a__)
model.eval()
A__ = model(a__ , attention_mask=a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A__ = self.num_choices
A__ = FlaubertForMultipleChoice(config=a__)
model.to(a__)
model.eval()
A__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A__ = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case_ ( self , a__ , a__ , a__=False):
A__ = super()._prepare_for_class(a__ , a__ , return_labels=a__)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__)
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__)
return inputs_dict
def snake_case_ ( self):
A__ = FlaubertModelTester(self)
A__ = ConfigTester(self , config_class=a__ , emb_dim=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a__)
@slow
def snake_case_ ( self):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = FlaubertModel.from_pretrained(a__)
self.assertIsNotNone(a__)
@slow
@require_torch_gpu
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
A__ = True
A__ = model_class(config=a__)
A__ = self._prepare_for_class(a__ , a__)
A__ = torch.jit.trace(
a__ , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a__ , os.path.join(a__ , '''traced_model.pt'''))
A__ = torch.jit.load(os.path.join(a__ , '''traced_model.pt''') , map_location=a__)
loaded(inputs_dict['''input_ids'''].to(a__) , inputs_dict['''attention_mask'''].to(a__))
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ ( self):
A__ = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''')
A__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
with torch.no_grad():
A__ = model(a__)[0]
A__ = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , a__)
A__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4))
| 632
|
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : dict , UpperCamelCase_ : str )-> set[str]:
A__ , A__ = set(UpperCamelCase_ ), [start]
while stack:
A__ = stack.pop()
explored.add(UpperCamelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase_ )
return explored
_lowercase = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 632
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __UpperCAmelCase (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self: str , UpperCAmelCase_: Union[str, Any]=None , **UpperCAmelCase_: Dict ):
'''simple docstring'''
super().__init__(features=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and column:
if all(
isinstance(UpperCAmelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase_ )
return column
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase_ , (str, bytes, type(UpperCAmelCase_ )) ):
return value
elif isinstance(UpperCAmelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_SCREAMING_SNAKE_CASE = {}
if isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_SCREAMING_SNAKE_CASE = {"""dtype""": torch.intaa}
elif isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_SCREAMING_SNAKE_CASE = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase_ )
return torch.tensor(UpperCAmelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase_ , """__array__""" ) and not isinstance(UpperCAmelCase_ , torch.Tensor ):
_SCREAMING_SNAKE_CASE = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase_ , map_list=UpperCAmelCase_ )
def UpperCamelCase ( self: str , UpperCAmelCase_: pa.Table ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_row(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_row(UpperCAmelCase_ )
return self.recursive_tensorize(UpperCAmelCase_ )
def UpperCamelCase ( self: Any , UpperCAmelCase_: pa.Table ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_column(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_column(UpperCAmelCase_ , pa_table.column_names[0] )
_SCREAMING_SNAKE_CASE = self.recursive_tensorize(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._consolidate(UpperCAmelCase_ )
return column
def UpperCamelCase ( self: str , UpperCAmelCase_: pa.Table ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_batch(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.recursive_tensorize(UpperCAmelCase_ )
for column_name in batch:
_SCREAMING_SNAKE_CASE = self._consolidate(batch[column_name] )
return batch
| 569
|
UpperCamelCase = 256
# Modulus to hash a string
UpperCamelCase = 1_000_003
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
if p_len > t_len:
return False
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_SCREAMING_SNAKE_CASE = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_SCREAMING_SNAKE_CASE = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_SCREAMING_SNAKE_CASE = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """abc1abc12"""
_SCREAMING_SNAKE_CASE = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_SCREAMING_SNAKE_CASE = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ ,snake_case__ ) and not rabin_karp(snake_case__ ,snake_case__ )
# Test 2)
_SCREAMING_SNAKE_CASE = """ABABX"""
_SCREAMING_SNAKE_CASE = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 3)
_SCREAMING_SNAKE_CASE = """AAAB"""
_SCREAMING_SNAKE_CASE = """ABAAAAAB"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 4)
_SCREAMING_SNAKE_CASE = """abcdabcy"""
_SCREAMING_SNAKE_CASE = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 5)
_SCREAMING_SNAKE_CASE = """Lü"""
_SCREAMING_SNAKE_CASE = """Lüsai"""
assert rabin_karp(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """Lue"""
assert not rabin_karp(snake_case__ ,snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 569
| 1
|
'''simple docstring'''
import math
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
return math.sqrt(_UpperCAmelCase ) * math.sqrt(_UpperCAmelCase ) == num
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
__snake_case = 0
__snake_case = n
while left <= right:
__snake_case = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__snake_case = mid - 1
else:
__snake_case = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "autoformer"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : List[str] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 6_4 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int = 1_0 , __lowerCamelCase : int = 2_5 , __lowerCamelCase : int = 3 , **__lowerCamelCase : int , ):
# time series specific configuration
UpperCAmelCase__ :Any = prediction_length
UpperCAmelCase__ :Optional[int] = context_length if context_length is not None else prediction_length
UpperCAmelCase__ :int = distribution_output
UpperCAmelCase__ :str = loss
UpperCAmelCase__ :Tuple = input_size
UpperCAmelCase__ :Optional[int] = num_time_features
UpperCAmelCase__ :List[Any] = lags_sequence
UpperCAmelCase__ :Any = scaling
UpperCAmelCase__ :Union[str, Any] = num_dynamic_real_features
UpperCAmelCase__ :Dict = num_static_real_features
UpperCAmelCase__ :Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase__ :Any = cardinality
else:
UpperCAmelCase__ :Dict = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase__ :List[str] = embedding_dimension
else:
UpperCAmelCase__ :int = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ :Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ :Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ :Optional[Any] = d_model
UpperCAmelCase__ :Any = encoder_attention_heads
UpperCAmelCase__ :Any = decoder_attention_heads
UpperCAmelCase__ :int = encoder_ffn_dim
UpperCAmelCase__ :Union[str, Any] = decoder_ffn_dim
UpperCAmelCase__ :Optional[Any] = encoder_layers
UpperCAmelCase__ :str = decoder_layers
UpperCAmelCase__ :Optional[Any] = dropout
UpperCAmelCase__ :str = attention_dropout
UpperCAmelCase__ :int = activation_dropout
UpperCAmelCase__ :Optional[Any] = encoder_layerdrop
UpperCAmelCase__ :Dict = decoder_layerdrop
UpperCAmelCase__ :Union[str, Any] = activation_function
UpperCAmelCase__ :List[Any] = init_std
UpperCAmelCase__ :List[str] = use_cache
# Autoformer
UpperCAmelCase__ :List[str] = label_length
UpperCAmelCase__ :Dict = moving_average
UpperCAmelCase__ :str = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 467
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a :
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : int = 6 ):
snake_case : Node | None = None
snake_case : Node | None = None
self.create_linked_list(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : int ):
snake_case : str = Node()
snake_case : Any = current_node
snake_case : List[Any] = current_node
snake_case : str = current_node
for _ in range(1, SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[int] = Node()
snake_case : Any = current_node
snake_case : Union[str, Any] = previous_node
snake_case : List[str] = current_node
snake_case : Union[str, Any] = self.front
snake_case : int = previous_node
def __snake_case ( self : Optional[int] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case ( self : Tuple ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
snake_case : Any = self.rear.next
if self.rear:
snake_case : Dict = data
def __snake_case ( self : int ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
snake_case : Union[str, Any] = self.front.data
snake_case : str = None
return data
snake_case : List[Any] = self.front
snake_case : List[str] = old_front.next
snake_case : Dict = old_front.data
snake_case : Optional[Any] = None
return data
def __snake_case ( self : Dict ):
if self.is_empty():
raise Exception('''Empty Queue''' )
def __snake_case ( self : Optional[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class a :
def __init__( self : int ):
snake_case : Any | None = None
snake_case : Node | None = None
snake_case : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 555
|
'''simple docstring'''
import os
import sys
UpperCAmelCase = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A ( *A_ : Optional[Any] , **A_ : List[str] ):
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A ( *A_ : Dict , **A_ : str ):
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def A ( *A_ : Union[str, Any] , **A_ : Optional[Any] ):
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A ( *A_ : str , **A_ : Optional[Any] ):
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A ( *A_ : List[str] , **A_ : Optional[Any] ):
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A ( *A_ : Tuple , **A_ : List[str] ):
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A ( *A_ : Tuple , **A_ : Any ):
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 555
| 1
|
'''simple docstring'''
class A :
def __init__( self : Optional[Any] , __a : List[str] , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
__UpperCAmelCase = name
__UpperCAmelCase = value
__UpperCAmelCase = weight
def __repr__( self : Optional[Any] ) -> int:
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def snake_case__ ( self : Optional[int] ) -> List[str]:
return self.value
def snake_case__ ( self : Dict ) -> Optional[int]:
return self.name
def snake_case__ ( self : List[str] ) -> Optional[Any]:
return self.weight
def snake_case__ ( self : Any ) -> Optional[int]:
return self.value / self.weight
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
__UpperCAmelCase = []
__UpperCAmelCase , __UpperCAmelCase = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
|
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase : List[str] = 8.988e9 # units = N * m^s * C^-2
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__UpperCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCAmelCase = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCAmelCase = abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[int]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase__ = 1_28
elif "12-12" in model_name:
UpperCamelCase__ = 12
UpperCamelCase__ = 12
elif "14-14" in model_name:
UpperCamelCase__ = 14
UpperCamelCase__ = 14
elif "16-16" in model_name:
UpperCamelCase__ = 16
UpperCamelCase__ = 16
else:
raise ValueError('Model not supported' )
UpperCamelCase__ = 'huggingface/label-files'
if "speech-commands" in model_name:
UpperCamelCase__ = 35
UpperCamelCase__ = 'speech-commands-v2-id2label.json'
else:
UpperCamelCase__ = 5_27
UpperCamelCase__ = 'audioset-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "module.v" in name:
UpperCamelCase__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
UpperCamelCase__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
UpperCamelCase__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
UpperCamelCase__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
UpperCamelCase__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
UpperCamelCase__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = config.hidden_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
UpperCamelCase__ = model_name_to_url[model_name]
UpperCamelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE )
# rename some keys
UpperCamelCase__ = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load 🤗 model
UpperCamelCase__ = ASTForAudioClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase__ = -4.2677393 if 'speech-commands' not in model_name else -6.845978
UpperCamelCase__ = 4.5689974 if 'speech-commands' not in model_name else 5.5654526
UpperCamelCase__ = 10_24 if 'speech-commands' not in model_name else 1_28
UpperCamelCase__ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
UpperCamelCase__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
UpperCamelCase__ = dataset[0]['audio']['array']
else:
UpperCamelCase__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
UpperCamelCase__ , UpperCamelCase__ = torchaudio.load(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = waveform.squeeze().numpy()
UpperCamelCase__ = feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
A__ : Optional[int]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ : str= parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case__(nn.Module ):
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def snake_case ( self : Dict ):
lowercase__ : int = []
lowercase__ : Tuple = []
for i in range(self.num_layers ):
lowercase__ : List[Any] = self.in_channels if i == 0 else self.out_channels
lowercase__ : Dict = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
lowercase__ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
lowercase__ : Optional[Any] = resnets
lowercase__ : Union[str, Any] = attentions
if self.add_downsample:
lowercase__ : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=True ):
lowercase__ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase__ : Optional[int] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
lowercase__ : Tuple = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowercase__ : str = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case__(nn.Module ):
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def snake_case ( self : Optional[Any] ):
lowercase__ : str = []
for i in range(self.num_layers ):
lowercase__ : Tuple = self.in_channels if i == 0 else self.out_channels
lowercase__ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
lowercase__ : Any = resnets
if self.add_downsample:
lowercase__ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
lowercase__ : int = ()
for resnet in self.resnets:
lowercase__ : Optional[int] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowercase__ : Optional[int] = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case__(nn.Module ):
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def snake_case ( self : int ):
lowercase__ : List[str] = []
lowercase__ : int = []
for i in range(self.num_layers ):
lowercase__ : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase__ : Any = self.prev_output_channel if i == 0 else self.out_channels
lowercase__ : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
lowercase__ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
lowercase__ : Optional[Any] = resnets
lowercase__ : str = attentions
if self.add_upsample:
lowercase__ : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase__ : str = res_hidden_states_tuple[-1]
lowercase__ : int = res_hidden_states_tuple[:-1]
lowercase__ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase__ : str = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
lowercase__ : Dict = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
lowercase__ : Tuple = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class snake_case__(nn.Module ):
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = 4_2
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def snake_case ( self : Optional[Any] ):
lowercase__ : Any = []
for i in range(self.num_layers ):
lowercase__ : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase__ : Tuple = self.prev_output_channel if i == 0 else self.out_channels
lowercase__ : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
lowercase__ : Tuple = resnets
if self.add_upsample:
lowercase__ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
lowercase__ : Any = res_hidden_states_tuple[-1]
lowercase__ : Any = res_hidden_states_tuple[:-1]
lowercase__ : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase__ : Optional[Any] = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
lowercase__ : Tuple = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class snake_case__(nn.Module ):
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def snake_case ( self : Any ):
lowercase__ : Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase__ : List[str] = []
for _ in range(self.num_layers ):
lowercase__ : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
lowercase__ : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
lowercase__ : Any = resnets
lowercase__ : Optional[Any] = attentions
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str=True ):
lowercase__ : List[Any] = self.resnets[0](_lowerCAmelCase , _lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase__ : List[Any] = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
lowercase__ : Tuple = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
return hidden_states
| 496
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
snake_case_ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case_ = get_size_dict(_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_lowerCAmelCase , size=(size["height"], size["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : int=None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
snake_case_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 283
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( a : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowercase_ : List[Any] = 2**power
lowercase_ : Tuple = str(a )
lowercase_ : Dict = list(a )
lowercase_ : Optional[Any] = 0
for i in list_num:
sum_of_num += int(a )
return sum_of_num
if __name__ == "__main__":
A: Any = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A: List[Any] = solution(power)
print("Sum of the digits is: ", result)
| 7
|
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :Dict=None , a :Optional[int]=None ) -> Any:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
__snake_case = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
__snake_case = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Benchmark training of model'''} )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Verbose memory tracing'''} )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
__snake_case = field(
default=_lowercase , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Trace memory line by line'''} )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Save result to a CSV file'''} )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Save all print statements in a log file'''} )
__snake_case = field(default=_lowercase , metadata={'''help''': '''Whether to print environment information'''} )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
__snake_case = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
__snake_case = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
__snake_case = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
__snake_case = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
__snake_case = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
__snake_case = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
__snake_case = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , A_ , )
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 117
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head('https://huggingface.co' )
| 91
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _UpperCAmelCase ):
snake_case_ = (DDPMParallelScheduler,)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase_ )
return config
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_, beta_end=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_, prediction_type=lowerCamelCase_, sample_max_value=lowerCamelCase_, )
def snake_case__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea], dim=0 )
lowercase__ = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1, lowerCamelCase_ )
lowercase__ = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
lowercase__ = scheduler.batch_step_no_noise(lowerCamelCase_, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ) )
lowercase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
lowercase__ = model(lowerCamelCase_, lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
lowercase__ = model(lowerCamelCase_, lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
lowercase__ = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
lowercase__ = -1
else:
lowercase__ = timesteps[i + 1]
lowercase__ = scheduler.previous_timestep(lowerCamelCase_ )
lowercase__ = prev_t.item()
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_, msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [100, 87, 50, 1, 0]
lowercase__ = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_, timesteps=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 718
|
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668
| 0
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( snake_case_ :ndarray ):
return np.dot(snake_case_ , snake_case_ )
class _UpperCAmelCase :
def __init__( self : List[Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowercase )
def a ( self : Optional[Any] , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def a ( self : List[Any] , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self : Dict , _lowercase : list[ndarray] , _lowercase : ndarray ):
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__UpperCAmelCase) , ) = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
__UpperCAmelCase = 0
((__UpperCAmelCase) , ) = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def a ( self : Optional[int] , _lowercase : ndarray ):
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : List[str] , _lowerCamelCase : NestedDataStructureLike[PathLike] , _lowerCamelCase : Optional[NamedSplit] = None , _lowerCamelCase : Optional[Features] = None , _lowerCamelCase : str = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : Optional[int] , ):
super().__init__(
_lowerCamelCase , split=_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , streaming=_lowerCamelCase , num_proc=_lowerCamelCase , **_lowerCamelCase , )
snake_case__ : Tuple = path_or_paths if isinstance(_lowerCamelCase , _lowerCamelCase ) else {self.split: path_or_paths}
snake_case__ : Any = Text(
cache_dir=_lowerCamelCase , data_files=_lowerCamelCase , features=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
snake_case__ : int = None
snake_case__ : Dict = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , num_proc=self.num_proc , )
snake_case__ : str = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 170
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowercase : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A = downstream_dict["""projector.weight"""]
A = downstream_dict["""projector.bias"""]
A = downstream_dict["""model.post_net.linear.weight"""]
A = downstream_dict["""model.post_net.linear.bias"""]
return model
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: str , UpperCamelCase__: Any ) -> List[str]:
"""simple docstring"""
A = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A = downstream_dict["""model.linear.weight"""]
A = downstream_dict["""model.linear.bias"""]
return model
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ) -> int:
"""simple docstring"""
A = WavaVecaForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A = downstream_dict["""connector.weight"""]
A = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
A = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A = torch.load(UpperCamelCase__ , map_location="""cpu""" )
A = checkpoint["""Downstream"""]
A = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
A = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForXVector""" ):
A = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
A = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_lowercase : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 546
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Tuple:
"""simple docstring"""
def wrapper(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
A = timeit.default_timer()
A = func(*UpperCamelCase__ , **UpperCamelCase__ )
A = timeit.default_timer() - starttime
return delta
A = func.__name__
return wrapper
def _lowerCAmelCase ( UpperCamelCase__: dict , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: int=None ) -> Any:
"""simple docstring"""
A = []
A = seq_shapes or {}
for i in range(UpperCamelCase__ ):
A = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase__ , _ArrayXD ):
A = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase__ , datasets.Value ):
if v.dtype == "string":
A = """The small grey turtle was surprisingly fast when challenged."""
else:
A = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase__ , datasets.Sequence ):
while isinstance(UpperCamelCase__ , datasets.Sequence ):
A = v.feature
A = seq_shapes[k]
A = np.random.rand(*UpperCamelCase__ ).astype(v.dtype )
A = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: str=None ) -> Optional[int]:
"""simple docstring"""
A = generate_examples(UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes=UpperCamelCase__ )
with ArrowWriter(features=UpperCamelCase__ , path=UpperCamelCase__ ) as writer:
for key, record in dummy_data:
A = features.encode_example(UpperCamelCase__ )
writer.write(UpperCamelCase__ )
A , A = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
A = datasets.Dataset.from_file(filename=UpperCamelCase__ , info=datasets.DatasetInfo(features=UpperCamelCase__ ) )
return dataset
| 546
| 1
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 92
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '''\
Text data.
Second line of data.'''
__A = '''file'''
@pytest.fixture(scope="session" )
def lowercase_ ( _lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__lowerCamelCase : Optional[int] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__lowerCamelCase : Optional[int] = input_paths[compression_format]
__lowerCamelCase : List[str] = tmp_path / "cache"
__lowerCamelCase : Optional[Any] = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
__lowerCamelCase : str = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : int = f.read()
with open(_lowerCamelCase ) as f:
__lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: str , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Any = "custom_cache"
__lowerCamelCase : Optional[int] = "custom_extracted_dir"
__lowerCamelCase : Tuple = tmp_path / "custom_extracted_path"
if default_extracted:
__lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
__lowerCamelCase : Tuple = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowerCamelCase : Union[str, Any] = xz_file
__lowerCamelCase : Union[str, Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
__lowerCamelCase : List[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowercase_ ( _lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[str] = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
__lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
__lowerCamelCase : Optional[int] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( ) -> Any:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 646
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowercase_ , )
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = RobertaConfig
lowerCAmelCase_ = """roberta"""
def __init__( self : Dict , UpperCamelCase__ : List[str] ) -> List[Any]:
super().__init__(UpperCamelCase__ )
_UpperCamelCase =RobertaEmbeddings(UpperCamelCase__ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowercase_ , )
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = RobertaConfig
lowerCAmelCase_ = """roberta"""
def __init__( self : Tuple , UpperCamelCase__ : Any ) -> Dict:
super().__init__(UpperCamelCase__ )
_UpperCamelCase =config.num_labels
_UpperCamelCase =config.num_hidden_layers
_UpperCamelCase =DeeRobertaModel(UpperCamelCase__ )
_UpperCamelCase =nn.Dropout(config.hidden_dropout_prob )
_UpperCamelCase =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=-1 , UpperCamelCase__ : Dict=False , ) -> Union[str, Any]:
_UpperCamelCase =self.num_layers
try:
_UpperCamelCase =self.roberta(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , )
_UpperCamelCase =outputs[1]
_UpperCamelCase =self.dropout(UpperCamelCase__ )
_UpperCamelCase =self.classifier(UpperCamelCase__ )
_UpperCamelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCamelCase =e.message
_UpperCamelCase =e.exit_layer
_UpperCamelCase =outputs[0]
if not self.training:
_UpperCamelCase =entropy(UpperCamelCase__ )
_UpperCamelCase =[]
_UpperCamelCase =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase =MSELoss()
_UpperCamelCase =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCamelCase =CrossEntropyLoss()
_UpperCamelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_UpperCamelCase =[]
for highway_exit in outputs[-1]:
_UpperCamelCase =highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase =MSELoss()
_UpperCamelCase =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCamelCase =CrossEntropyLoss()
_UpperCamelCase =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase__ )
if train_highway:
_UpperCamelCase =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_UpperCamelCase =(loss,) + outputs
if not self.training:
_UpperCamelCase =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCamelCase =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 713
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : str = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__lowerCamelCase : Tuple = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _a ():
"""simple docstring"""
_UpperCamelCase =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase =bs[:]
_UpperCamelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(__SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
_UpperCamelCase =[chr(__SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =set()
_UpperCamelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase =char
return pairs
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any="replace" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : int=False , **UpperCamelCase__ : int , ) -> Tuple:
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase =json.load(UpperCamelCase__ )
_UpperCamelCase ={v: k for k, v in self.encoder.items()}
_UpperCamelCase =errors # how to handle errors in decoding
_UpperCamelCase =bytes_to_unicode()
_UpperCamelCase ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase =merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase =[tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_UpperCamelCase ={}
_UpperCamelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Tuple ) -> List[str]:
return len(self.encoder )
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[Any] ) -> List[Any]:
if token in self.cache:
return self.cache[token]
_UpperCamelCase =tuple(UpperCamelCase__ )
_UpperCamelCase =get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
_UpperCamelCase =min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase =bigram
_UpperCamelCase =[]
_UpperCamelCase =0
while i < len(UpperCamelCase__ ):
try:
_UpperCamelCase =word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase =j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase =tuple(UpperCamelCase__ )
_UpperCamelCase =new_word
if len(UpperCamelCase__ ) == 1:
break
else:
_UpperCamelCase =get_pairs(UpperCamelCase__ )
_UpperCamelCase =''' '''.join(UpperCamelCase__ )
_UpperCamelCase =word
return word
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase =[]
for token in re.findall(self.pat , UpperCamelCase__ ):
_UpperCamelCase =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : str ) -> int:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : List[Any] ) -> int:
return self.decoder.get(UpperCamelCase__ )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Any ) -> List[Any]:
_UpperCamelCase =''''''.join(UpperCamelCase__ )
_UpperCamelCase =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
_UpperCamelCase =0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase =token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase =[self.cls_token_id]
_UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase =[self.sep_token_id]
_UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : str ) -> Dict:
_UpperCamelCase =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
_UpperCamelCase =''' ''' + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ) -> dict:
_UpperCamelCase =super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
_UpperCamelCase =len(UpperCamelCase__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 271
| 0
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> Tuple:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_lowerCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_a : Tuple = 'Enter the base and the power separated by a comma: '
_a , _a : Optional[int] = map(int, input(prompt).split(','))
_a , _a : List[str] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_a : Optional[int] = res(xa, ya)
_a : str = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 213
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optional[int] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "xlm"
_UpperCamelCase : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , a__=30145 , a__=2048 , a__=12 , a__=16 , a__=0.1 , a__=0.1 , a__=True , a__=False , a__=False , a__=False , a__=1 , a__=True , a__=512 , a__=2048**-0.5 , a__=1e-12 , a__=0.0_2 , a__=0 , a__=1 , a__=2 , a__=3 , a__=5 , a__=True , a__="first" , a__=True , a__=None , a__=True , a__=0.1 , a__=5 , a__=5 , a__=0 , a__=0 , a__=2 , a__=0 , **a__ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = emb_dim
_lowerCAmelCase : Union[str, Any] = n_layers
_lowerCAmelCase : str = n_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : Union[str, Any] = attention_dropout
_lowerCAmelCase : Optional[Any] = gelu_activation
_lowerCAmelCase : Tuple = sinusoidal_embeddings
_lowerCAmelCase : Optional[int] = causal
_lowerCAmelCase : List[str] = asm
_lowerCAmelCase : Dict = n_langs
_lowerCAmelCase : str = use_lang_emb
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = bos_index
_lowerCAmelCase : int = eos_index
_lowerCAmelCase : str = pad_index
_lowerCAmelCase : List[str] = unk_index
_lowerCAmelCase : Optional[int] = mask_index
_lowerCAmelCase : str = is_encoder
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = embed_init_std
_lowerCAmelCase : Optional[int] = init_std
_lowerCAmelCase : Optional[Any] = summary_type
_lowerCAmelCase : Union[str, Any] = summary_use_proj
_lowerCAmelCase : List[Any] = summary_activation
_lowerCAmelCase : Union[str, Any] = summary_proj_to_labels
_lowerCAmelCase : Optional[Any] = summary_first_dropout
_lowerCAmelCase : Optional[Any] = start_n_top
_lowerCAmelCase : List[Any] = end_n_top
_lowerCAmelCase : List[str] = mask_token_id
_lowerCAmelCase : Optional[int] = lang_id
if "n_words" in kwargs:
_lowerCAmelCase : Tuple = kwargs["""n_words"""]
super().__init__(pad_token_id=a__ , bos_token_id=a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def __A ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 213
| 1
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_lowerCamelCase = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _snake_case ( cls :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(__A )
@classmethod
def _snake_case ( cls :str ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE__ = FlaxBertModel(__A )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__A , repo_id="""test-model-flax""" , push_to_hub=__A , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1E-3 , msg=f'''{key} not identical''' )
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
SCREAMING_SNAKE_CASE__ = FlaxBertModel(__A )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__A , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__A , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__A , 1E-3 , msg=f'''{key} not identical''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
SCREAMING_SNAKE_CASE__ = False
return models_are_equal
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ = FlaxBertModel(__A )
SCREAMING_SNAKE_CASE__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__A , __A ) )
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertTrue(check_models_equal(__A , __A ) )
def _snake_case ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ = FlaxBertModel(__A )
SCREAMING_SNAKE_CASE__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__A , __A ) , max_shard_size="""10KB""" )
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertTrue(check_models_equal(__A , __A ) )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """bert"""
SCREAMING_SNAKE_CASE__ = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertIsNotNone(__A )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """bert"""
SCREAMING_SNAKE_CASE__ = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = FlaxBertModel.from_pretrained(__A , subfolder=__A )
self.assertIsNotNone(__A )
| 707
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCamelCase = '\\n Text data.\n Second line of data.'
_lowerCamelCase = 'file'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , """w""" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = """custom_cache"""
SCREAMING_SNAKE_CASE__ = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ = xz_file
SCREAMING_SNAKE_CASE__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt"""
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
http_get("""https://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 59
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __UpperCamelCase( _A : list ):
'''simple docstring'''
if not postfix_notation:
return 0
UpperCAmelCase__ : int = {"+", "-", "*", "/"}
UpperCAmelCase__ : list[Any] = []
for token in postfix_notation:
if token in operations:
UpperCAmelCase__ : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if not conversation_id:
snake_case : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
snake_case : Optional[Any] = []
if generated_responses is None:
snake_case : Optional[Any] = []
snake_case : uuid.UUID = conversation_id
snake_case : List[str] = past_user_inputs
snake_case : List[str] = generated_responses
snake_case : Optional[str] = text
def __eq__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> Dict:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
snake_case : int = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
snake_case : Any = text
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case : Dict = None
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
self.generated_responses.append(UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
snake_case : List[str] = "user" if is_user else "bot"
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
snake_case_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.tokenizer.pad_token_id is None:
snake_case : str = self.tokenizer.eos_token
def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = {}
snake_case : Optional[int] = {}
snake_case : Optional[Any] = {}
if min_length_for_response is not None:
snake_case : int = min_length_for_response
if minimum_tokens is not None:
snake_case : Dict = minimum_tokens
if "max_length" in generate_kwargs:
snake_case : Any = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , UpperCamelCase__ , UpperCamelCase__=0 , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : int = super().__call__(UpperCamelCase__ , num_workers=UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=32 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
snake_case : Optional[Any] = self.tokenizer._build_conversation_input_ids(UpperCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case : Optional[Any] = self._legacy_parse_and_tokenize(UpperCamelCase__ )
if self.framework == "pt":
snake_case : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case : Optional[int] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=10 , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = generate_kwargs.get("max_length" , self.model.config.max_length )
snake_case : Optional[Any] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
snake_case : List[str] = max_length - minimum_tokens
snake_case : Dict = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
snake_case : List[Any] = model_inputs["attention_mask"][:, -trim:]
snake_case : Union[str, Any] = model_inputs.pop("conversation" )
snake_case : Union[str, Any] = max_length
snake_case : Any = self.model.generate(**UpperCamelCase__ , **UpperCamelCase__ )
if self.model.config.is_encoder_decoder:
snake_case : Optional[int] = 1
else:
snake_case : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=True ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = model_outputs["output_ids"]
snake_case : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
snake_case : List[Any] = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(UpperCamelCase__ )
return conversation
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : str = self.tokenizer.eos_token_id
snake_case : str = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
if len(UpperCamelCase__ ) > self.tokenizer.model_max_length:
snake_case : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 178
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( UpperCamelCase__ ):
_a = "Wav2Vec2FeatureExtractor"
_a = "AutoTokenizer"
def __init__( self , _a , _a ) -> List[str]:
super().__init__(_a , _a )
_A : int = self.feature_extractor
_A : Optional[int] = False
@classmethod
def a__ ( cls , _a , **_a ) -> str:
try:
return super().from_pretrained(_a , **_a )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _a , )
_A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_a , **_a )
_A : str = WavaVecaCTCTokenizer.from_pretrained(_a , **_a )
return cls(feature_extractor=_a , tokenizer=_a )
def __call__( self , *_a , **_a ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_A : Dict = kwargs.pop("""raw_speech""" )
else:
_A : int = kwargs.pop("""audio""" , _a )
_A : str = kwargs.pop("""sampling_rate""" , _a )
_A : Tuple = kwargs.pop("""text""" , _a )
if len(_a ) > 0:
_A : Dict = args[0]
_A : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_A : Optional[Any] = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_A : str = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A : Dict = encodings["""input_ids"""]
return inputs
def a__ ( self , *_a , **_a ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_A : Optional[Any] = kwargs.pop("""input_features""" , _a )
_A : Dict = kwargs.pop("""labels""" , _a )
if len(_a ) > 0:
_A : Union[str, Any] = args[0]
_A : List[Any] = args[1:]
if input_features is not None:
_A : Dict = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_A : List[Any] = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_A : List[str] = labels["""input_ids"""]
return input_features
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def a__ ( self ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_A : List[str] = True
_A : Optional[int] = self.tokenizer
yield
_A : List[str] = self.feature_extractor
_A : str = False
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __UpperCamelCase ( _A ):
def wrapper(*_A , **_A ):
lowerCAmelCase_ = timeit.default_timer()
lowerCAmelCase_ = func(*_A , **_A )
lowerCAmelCase_ = timeit.default_timer() - starttime
return delta
lowerCAmelCase_ = func.__name__
return wrapper
def __UpperCamelCase ( _A , _A=100 , _A=None ):
lowerCAmelCase_ = []
lowerCAmelCase_ = seq_shapes or {}
for i in range(_A ):
lowerCAmelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_A , _ArrayXD ):
lowerCAmelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_A , datasets.Value ):
if v.dtype == "string":
lowerCAmelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase_ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_A , datasets.Sequence ):
while isinstance(_A , datasets.Sequence ):
lowerCAmelCase_ = v.feature
lowerCAmelCase_ = seq_shapes[k]
lowerCAmelCase_ = np.random.rand(*_A ).astype(v.dtype )
lowerCAmelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def __UpperCamelCase ( _A , _A , _A=100 , _A=None ):
lowerCAmelCase_ = generate_examples(_A , num_examples=_A , seq_shapes=_A )
with ArrowWriter(features=_A , path=_A ) as writer:
for key, record in dummy_data:
lowerCAmelCase_ = features.encode_example(_A )
writer.write(_A )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCAmelCase_ = datasets.Dataset.from_file(filename=_A , info=datasets.DatasetInfo(features=_A ) )
return dataset
| 431
|
from collections.abc import Sequence
def __UpperCamelCase ( _A , _A = False ):
if not arr:
return 0
lowerCAmelCase_ = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ = 0.0
for num in arr:
lowerCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase_ = max(_A , _A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 431
| 1
|
'''simple docstring'''
def A ( A_ : int ):
if not isinstance(A_ , A_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
snake_case : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 555
|
'''simple docstring'''
import numpy as np
class a :
def __init__( self : Dict ):
snake_case : List[str] = (0, 0)
snake_case : Tuple = None
snake_case : Union[str, Any] = 0
snake_case : Dict = 0
snake_case : List[str] = 0
def __eq__( self : str, SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.position == cell.position
def __snake_case ( self : Optional[int] ):
print(self.position )
class a :
def __init__( self : Dict, SCREAMING_SNAKE_CASE_ : Union[str, Any]=(5, 5) ):
snake_case : int = np.zeros(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = world_size[0]
snake_case : Optional[Any] = world_size[1]
def __snake_case ( self : Any ):
print(self.w )
def __snake_case ( self : str, SCREAMING_SNAKE_CASE_ : Optional[int] ):
snake_case : Union[str, Any] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
snake_case : str = cell.position[0]
snake_case : Optional[Any] = cell.position[1]
snake_case : Any = []
for n in neughbour_cord:
snake_case : List[str] = current_x + n[0]
snake_case : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
snake_case : int = Cell()
snake_case : int = (x, y)
snake_case : Any = cell
neighbours.append(SCREAMING_SNAKE_CASE_ )
return neighbours
def A ( A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] ):
snake_case : Union[str, Any] = []
snake_case : Union[str, Any] = []
_open.append(A_ )
while _open:
snake_case : List[Any] = np.argmin([n.f for n in _open] )
snake_case : List[Any] = _open[min_f]
_closed.append(_open.pop(A_ ) )
if current == goal:
break
for n in world.get_neigbours(A_ ):
for c in _closed:
if c == n:
continue
snake_case : Dict = current.g + 1
snake_case, snake_case : Optional[Any] = n.position
snake_case, snake_case : Union[str, Any] = goal.position
snake_case : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
snake_case : Union[str, Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A_ )
snake_case : Dict = []
while current.parent is not None:
path.append(current.position )
snake_case : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase = Gridworld()
# Start position and goal
UpperCAmelCase = Cell()
UpperCAmelCase = (0, 0)
UpperCAmelCase = Cell()
UpperCAmelCase = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
UpperCAmelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase = 1
print(world.w)
| 555
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """git_vision_model"""
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE="quick_gelu" , SCREAMING_SNAKE_CASE=1E-5 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : Tuple = hidden_size
snake_case : Dict = intermediate_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Optional[int] = num_channels
snake_case : str = patch_size
snake_case : str = image_size
snake_case : List[Any] = initializer_range
snake_case : Union[str, Any] = attention_dropout
snake_case : str = layer_norm_eps
snake_case : List[Any] = hidden_act
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
snake_case , snake_case : Optional[int] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = """git"""
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=30_522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1_024 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1E-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=101 , SCREAMING_SNAKE_CASE=102 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if vision_config is None:
snake_case : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case : int = GitVisionConfig(**SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = vocab_size
snake_case : str = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : Tuple = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : Any = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : Optional[int] = initializer_range
snake_case : Union[str, Any] = layer_norm_eps
snake_case : List[str] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Optional[Any] = tie_word_embeddings
snake_case : Tuple = num_image_with_embedding
snake_case : Tuple = bos_token_id
snake_case : Union[str, Any] = eos_token_id
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = copy.deepcopy(self.__dict__ )
snake_case : List[str] = self.vision_config.to_dict()
snake_case : Any = self.__class__.model_type
return output
| 134
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCamelCase : Tuple = OpenAIGPTTokenizer
_lowerCamelCase : Union[str, Any] = OpenAIGPTTokenizerFast
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[Any] = False
def __magic_name__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
a_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
a_ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
return "lower newer", "lower newer"
def __magic_name__ ( self ):
a_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
a_ = """lower"""
a_ = ["""low""", """er</w>"""]
a_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = tokens + ["""<unk>"""]
a_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
a_ = """This is a simple input"""
a_ = ["""This is a simple input 1""", """This is a simple input 2"""]
a_ = ("""This is a simple input""", """This is a pair""")
a_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" , )
def __magic_name__ ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
pass
| 403
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
a_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCamelCase )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = parse_args()
# Import training_script as a module.
a_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a_ = script_fpath.stem
a_ = importlib.import_module(UpperCamelCase )
# Patch sys.argv
a_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 403
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
A : List[str] = list[tuple[int, int]]
A : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Node | None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = parent
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :tuple[int, int] , lowerCamelCase_ :tuple[int, int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase_ )
UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase_ )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Any ) -> Path | None:
"""simple docstring"""
while self.node_queue:
UpperCamelCase__ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase__ = True
return self.retrace_path(lowerCamelCase_ )
UpperCamelCase__ = self.get_successors(lowerCamelCase_ )
for node in successors:
self.node_queue.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Node ) -> list[Node]:
"""simple docstring"""
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , lowerCamelCase_ ) )
return successors
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Node | None ) -> Path:
"""simple docstring"""
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = BreadthFirstSearch(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = False
def lowerCamelCase__ ( self :int ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase__ = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase__ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase__ = True
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = current_bwd_node
UpperCamelCase__ = current_fwd_node
UpperCamelCase__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Node , lowerCamelCase_ :Node ) -> Path:
"""simple docstring"""
UpperCamelCase__ = self.fwd_bfs.retrace_path(lowerCamelCase_ )
UpperCamelCase__ = self.bwd_bfs.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : str = (0, 0)
A : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Optional[int] = BreadthFirstSearch(init, goal)
A : List[str] = bfs.search()
A : Dict = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
A : Optional[int] = time.time()
A : Any = BidirectionalBreadthFirstSearch(init, goal)
A : List[Any] = bd_bfs.search()
A : Dict = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 516
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 516
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 1_6 ):
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ = 1_6
elif accelerator.mixed_precision != "no":
lowercase_ = 8
else:
lowercase_ = None
return tokenizer.pad(
UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1":
lowercase_ = 2
# New Code #
lowercase_ = int(args.gradient_accumulation_steps )
lowercase_ = int(args.local_sgd_steps )
# Initialize accelerator
lowercase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config["""lr"""]
lowercase_ = int(config["""num_epochs"""] )
lowercase_ = int(config["""seed"""] )
lowercase_ = int(config["""batch_size"""] )
lowercase_ = evaluate.load("""glue""" , """mrpc""" )
set_seed(UpperCAmelCase__ )
lowercase_ , lowercase_ = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ = AdamW(params=model.parameters() , lr=UpperCAmelCase__ )
# Instantiate scheduler
lowercase_ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase__ , model=UpperCAmelCase__ , local_sgd_steps=UpperCAmelCase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase__ ):
lowercase_ = model(**UpperCAmelCase__ )
lowercase_ = output.loss
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase__ )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowercase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCAmelCase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=UpperCAmelCase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase_ = parser.parse_args()
lowercase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase_ () -> int:
'''simple docstring'''
lowerCAmelCase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowerCAmelCase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
lowerCAmelCase__ = parser.parse_args()
if not hasattr(lowercase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase__ = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 668
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A__ ( lowercase: Optional[Any], lowercase: List[Any]=False ) -> Dict:
A : Optional[Any] =OmegaConf.load(lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase ) ) )
return config
def A__ ( lowercase: Union[str, Any], lowercase: str=None, lowercase: List[Any]=None ) -> List[Any]:
if conf_path is None:
A : List[str] ='./model_checkpoints/vqgan_only.yaml'
A : Any =load_config(lowercase, display=lowercase )
A : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
A : Union[str, Any] ='./model_checkpoints/vqgan_only.pt'
A : Optional[int] =torch.load(lowercase, map_location=lowercase )
if ".ckpt" in ckpt_path:
A : Optional[int] =sd['state_dict']
model.load_state_dict(lowercase, strict=lowercase )
model.to(lowercase )
del sd
return model
def A__ ( lowercase: int, lowercase: Tuple ) -> List[str]:
A , A , A : List[str] =model.encode(lowercase )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
A : Dict =model.decode(lowercase )
return xrec
def A__ ( lowercase: Optional[int], lowercase: int=False ) -> Dict:
A , A : Any =string.rsplit('.', 1 )
if reload:
A : int =importlib.import_module(lowercase )
importlib.reload(lowercase )
return getattr(importlib.import_module(lowercase, package=lowercase ), cls )
def A__ ( lowercase: List[Any] ) -> int:
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params', {} ) )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Union[str, Any]=True, lowercase: Any=True ) -> List[str]:
A : Dict =instantiate_from_config(lowercase )
if sd is not None:
model.load_state_dict(lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A__ ( lowercase: Optional[Any], lowercase: Tuple, lowercase: Union[str, Any], lowercase: int ) -> List[str]:
# load the specified checkpoint
if ckpt:
A : str =torch.load(lowercase, map_location='cpu' )
A : str =pl_sd['global_step']
print(F'loaded model from global step {global_step}.' )
else:
A : Dict ={'state_dict': None}
A : Any =None
A : Optional[Any] =load_model_from_config(config.model, pl_sd['state_dict'], gpu=lowercase, eval_mode=lowercase )['model']
return model, global_step
| 661
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ = 3
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
print("Generating primitive root of p" )
while True:
lowercase__ = random.randrange(3 , SCREAMING_SNAKE_CASE_ )
if pow(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ ) == 1:
continue
if pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) == 1:
continue
return g
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
print("Generating prime p..." )
lowercase__ = rabin_miller.generate_large_prime(SCREAMING_SNAKE_CASE_ ) # select large prime number.
lowercase__ = primitive_root(SCREAMING_SNAKE_CASE_ ) # one primitive root on modulo p.
lowercase__ = random.randrange(3 , SCREAMING_SNAKE_CASE_ ) # private_key -> have to be greater than 2 for safety.
lowercase__ = cryptomath.find_mod_inverse(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowercase__ = (key_size, e_a, e_a, p)
lowercase__ = (key_size, d)
return public_key, private_key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowercase__ , lowercase__ = generate_key(SCREAMING_SNAKE_CASE_ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , "w" ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , "w" ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def __lowerCAmelCase ( ):
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 413
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : List[Any] =StableUnCLIPImgaImgPipeline
UpperCamelCase__ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ : int =frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : Optional[Any] =frozenset([])
def A__ ( self : Optional[Any] ):
lowercase__ = 32
lowercase__ = embedder_hidden_size
# image encoding components
lowercase__ = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowercase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase, projection_dim=__lowercase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=__lowercase )
lowercase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=__lowercase, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=__lowercase, layers_per_block=1, upcast_attention=__lowercase, use_linear_projection=__lowercase, )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=__lowercase, steps_offset=1, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self : Dict, __lowercase : Tuple, __lowercase : Union[str, Any]=0, __lowercase : Tuple=True ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(__lowercase ) ).to(__lowercase )
if pil_image:
lowercase__ = input_image * 0.5 + 0.5
lowercase__ = input_image.clamp(0, 1 )
lowercase__ = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowercase__ = DiffusionPipeline.numpy_to_pil(__lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self : str ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableUnCLIPImgaImgPipeline(**__lowercase )
lowercase__ = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
inputs.update({"image_embeds": None} )
lowercase__ = sd_pipe(**__lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self : List[str] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase )
def A__ ( self : Optional[Any] ):
lowercase__ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__lowercase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def A__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[Any] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Any ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(__lowercase, "anime turle", generator=__lowercase, output_type="np" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
def A__ ( self : Optional[int] ):
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.floataa )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
__lowercase, "anime turtle", num_inference_steps=2, output_type="np", )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 413
| 1
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_snake_case : Any = {"UserAgent": UserAgent().random}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = script.contents[0]
__snake_case : List[str] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : str ) -> Tuple:
__snake_case : Union[str, Any] = F'https://www.instagram.com/{username}/'
__snake_case : Dict = self.get_json()
def __snake_case ( self : List[Any] ) -> dict:
__snake_case : Optional[Any] = requests.get(self.url , headers=lowerCamelCase ).text
__snake_case : Tuple = BeautifulSoup(lowerCamelCase , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : List[str] ) -> str:
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def __snake_case ( self : Optional[int] ) -> str:
return self.user_data["username"]
@property
def __snake_case ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __snake_case ( self : Union[str, Any] ) -> str:
return self.user_data["biography"]
@property
def __snake_case ( self : Union[str, Any] ) -> str:
return self.user_data["business_email"]
@property
def __snake_case ( self : List[Any] ) -> str:
return self.user_data["external_url"]
@property
def __snake_case ( self : Optional[Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __snake_case ( self : Dict ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __snake_case ( self : List[Any] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __snake_case ( self : List[str] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __snake_case ( self : Tuple ) -> bool:
return self.user_data["is_verified"]
@property
def __snake_case ( self : Union[str, Any] ) -> bool:
return self.user_data["is_private"]
def lowerCAmelCase_ ( __lowerCamelCase = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
__snake_case : List[str] = InstagramUser(__lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Dict = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 715
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case : Dict = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __snake_case ( self : str ) -> str:
__snake_case : Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
__snake_case : Optional[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
__snake_case : Any = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
__snake_case : int = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
__snake_case : List[str] = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
__snake_case : Dict = text_classifier("This is great !" , return_all_scores=lowerCamelCase )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
__snake_case : Any = text_classifier("This is great !" , return_all_scores=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
__snake_case : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
__snake_case : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def __snake_case ( self : Optional[int] ) -> List[Any]:
import torch
__snake_case : Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
__snake_case : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def __snake_case ( self : Any ) -> Tuple:
__snake_case : List[Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
__snake_case : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def __snake_case ( self : int ) -> int:
__snake_case : Dict = pipeline("text-classification" )
__snake_case : Dict = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
__snake_case : int = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
__snake_case : str = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def __snake_case ( self : List[Any] ) -> str:
__snake_case : Optional[Any] = pipeline("text-classification" , framework="tf" )
__snake_case : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
__snake_case : Optional[int] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
__snake_case : Any = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : Union[str, Any] = TextClassificationPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> str:
__snake_case : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__snake_case : Any = "HuggingFace is in"
__snake_case : Tuple = text_classifier(lowerCamelCase )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__snake_case : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"]
__snake_case : Union[str, Any] = text_classifier(lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}, {"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__snake_case : str = text_classifier(lowerCamelCase , top_k=lowerCamelCase )
__snake_case : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [[{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] * N, [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] * N] , )
__snake_case : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__snake_case : Optional[Any] = text_classifier(lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , {"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__snake_case : Any = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCamelCase ):
text_classifier(lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__snake_case : List[Any] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 203
| 0
|
'''simple docstring'''
import math
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [True] * n
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase = i * 2
while index < n:
UpperCamelCase = False
UpperCamelCase = index + i
UpperCamelCase = [2]
for i in range(3 , __a , 2 ):
if is_prime[i]:
primes.append(__a )
return primes
def __lowerCamelCase ( A__ = 999_966_663_333 ) -> int:
"""simple docstring"""
UpperCamelCase = math.floor(math.sqrt(__a ) ) + 100
UpperCamelCase = prime_sieve(__a )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase = primes[prime_index + 1]
UpperCamelCase = last_prime**2
UpperCamelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 430
|
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20
| 0
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
if number > 0:
raise ValueError('input must be a negative integer' )
a__: Dict = len(bin(_SCREAMING_SNAKE_CASE )[3:] )
a__: List[str] = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
a__: Any = (
(
'1'
+ '0' * (binary_number_length - len(_SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( __lowerCAmelCase ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( lowercase) -> int:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 217
| 1
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__a : List[str] = TypeVar('''T''')
class UpperCAmelCase( Generic[T] ):
"""simple docstring"""
a : deque[T] # Cache store of keys
a : set[T] # References of the keys in cache
a : int = 1_0 # Maximum capacity of cache
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
lowercase__ : Optional[Any] = deque()
lowercase__ : Dict = set()
if not n:
lowercase__ : List[str] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : Optional[int] = n
def __a ( self , lowerCamelCase ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Tuple = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase )
else:
self.dq_store.remove(lowerCamelCase )
self.dq_store.appendleft(lowerCamelCase )
self.key_reference.add(lowerCamelCase )
def __a ( self ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(lowerCamelCase )
def __repr__( self ) -> str:
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 711
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a : Any = {'''facebook/blenderbot_small-90M''': 5_1_2}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : str = set()
lowercase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Any = char
lowercase__ : int = set(SCREAMING_SNAKE_CASE_ )
return pairs
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowercase__ : Optional[int] = json.load(lowerCamelCase )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowercase__ : Any = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in merges]
lowercase__ : Any = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowercase__ : Dict = {}
@property
def __a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = re.sub("([.,!?()])" , r" \1" , lowerCamelCase )
lowercase__ : Dict = re.sub("(')" , r" \1 " , lowerCamelCase )
lowercase__ : Union[str, Any] = re.sub(r"\s{2,}" , " " , lowerCamelCase )
if "\n" in token:
lowercase__ : Optional[Any] = token.replace("\n" , " __newln__" )
lowercase__ : Optional[Any] = token.split(" " )
lowercase__ : Union[str, Any] = []
for token in tokens:
if not len(lowerCamelCase ):
continue
lowercase__ : Union[str, Any] = token.lower()
lowercase__ : Any = tuple(lowerCamelCase )
lowercase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Optional[int] = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
lowercase__ : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : int = bigram
lowercase__ : str = []
lowercase__ : int = 0
while i < len(lowerCamelCase ):
try:
lowercase__ : List[str] = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
lowercase__ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(lowerCamelCase )
lowercase__ : Tuple = new_word
if len(lowerCamelCase ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(lowerCamelCase )
lowercase__ : Tuple = "@@ ".join(lowerCamelCase )
lowercase__ : Optional[Any] = word[:-4]
lowercase__ : int = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def __a ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = []
lowercase__ : Dict = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
lowercase__ : List[Any] = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : str = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 298
| 0
|
'''simple docstring'''
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : List[str] = 1
__A : Union[str, Any] = 2
while i * i <= n:
__A : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowercase ():
'''simple docstring'''
__A : Optional[Any] = 1
__A : Dict = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 111
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = 'beit'
def __init__( self , lowerCamelCase=8192 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=[3, 5, 7, 11] , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : int = vocab_size
__A : Tuple = hidden_size
__A : List[str] = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : Dict = intermediate_size
__A : Optional[int] = hidden_act
__A : str = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = initializer_range
__A : List[str] = layer_norm_eps
__A : str = image_size
__A : Optional[int] = patch_size
__A : List[Any] = num_channels
__A : List[str] = use_mask_token
__A : Union[str, Any] = use_absolute_position_embeddings
__A : Optional[int] = use_relative_position_bias
__A : int = use_shared_relative_position_bias
__A : int = layer_scale_init_value
__A : Union[str, Any] = drop_path_rate
__A : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__A : List[str] = out_indices
__A : str = pool_scales
# auxiliary head attributes (semantic segmentation)
__A : List[str] = use_auxiliary_head
__A : Tuple = auxiliary_loss_weight
__A : Dict = auxiliary_channels
__A : Tuple = auxiliary_num_convs
__A : List[str] = auxiliary_concat_input
__A : int = semantic_loss_ignore_index
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 1E-4
| 111
| 1
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = 1.0 , UpperCamelCase_ : int = None , ):
super().__init__()
lowerCAmelCase : Tuple = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : int = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : Any = name
def __call__( self : List[str] , UpperCamelCase_ : Union[str, Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Optional[int] = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : int = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : Optional[int] = global_step_float / warmup_steps_float
lowerCAmelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowercase , )
if num_warmup_steps:
lowerCAmelCase : Tuple = WarmUp(
initial_learning_rate=__lowercase , decay_schedule_fn=__lowercase , warmup_steps=__lowercase , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Union[str, Any] = AdamWeightDecay(
learning_rate=__lowercase , weight_decay_rate=__lowercase , beta_a=__lowercase , beta_a=__lowercase , epsilon=__lowercase , clipnorm=__lowercase , global_clipnorm=__lowercase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__lowercase , )
else:
lowerCAmelCase : Dict = tf.keras.optimizers.Adam(
learning_rate=__lowercase , beta_a=__lowercase , beta_a=__lowercase , epsilon=__lowercase , clipnorm=__lowercase , global_clipnorm=__lowercase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( __A ):
def __init__( self : List[str] , UpperCamelCase_ : Any = 0.001 , UpperCamelCase_ : Optional[Any] = 0.9 , UpperCamelCase_ : str = 0.999 , UpperCamelCase_ : Union[str, Any] = 1E-7 , UpperCamelCase_ : str = False , UpperCamelCase_ : Optional[Any] = 0.0 , UpperCamelCase_ : str = None , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Any = "AdamWeightDecay" , **UpperCamelCase_ : Optional[int] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Dict = weight_decay_rate
lowerCAmelCase : Union[str, Any] = include_in_weight_decay
lowerCAmelCase : str = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Tuple = {'WarmUp': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=None , **UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Optional[int] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : List[str] = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Dict = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=None ):
lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]=None ):
lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( __A ):
def __init__( self : List[str] ):
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = None
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
if self._accum_steps is None:
lowerCAmelCase : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Union[str, Any] , UpperCamelCase_ : Tuple ):
if not self._gradients:
lowerCAmelCase : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Optional[Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 704
|
"""simple docstring"""
class snake_case_:
def __init__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = val
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : int = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = val
def _snake_case ( _snake_case : Tuple , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Optional[Any] = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : Optional[int] = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 637
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :List[Any] = seq_length
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :Optional[Any] = use_input_mask
lowerCAmelCase__ :str = use_token_type_ids
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :Any = vocab_size
lowerCAmelCase__ :Union[str, Any] = hidden_size
lowerCAmelCase__ :List[str] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :Tuple = intermediate_size
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :int = hidden_dropout_prob
lowerCAmelCase__ :str = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = max_position_embeddings
lowerCAmelCase__ :str = type_vocab_size
lowerCAmelCase__ :List[str] = type_sequence_label_size
lowerCAmelCase__ :int = initializer_range
lowerCAmelCase__ :List[str] = num_labels
lowerCAmelCase__ :List[Any] = num_choices
lowerCAmelCase__ :int = relative_attention
lowerCAmelCase__ :Optional[Any] = position_biased_input
lowerCAmelCase__ :Union[str, Any] = pos_att_type
lowerCAmelCase__ :Optional[Any] = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ :Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :int = None
lowerCAmelCase__ :str = None
if self.use_labels:
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_config()
lowerCAmelCase__ :List[Any] = 3_0_0
return config
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :str = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.num_labels
lowerCAmelCase__ :List[str] = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :int = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Optional[Any] = config_and_inputs
lowerCAmelCase__ :Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ :List[str] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ :str = True
__magic_name__ :Tuple = False
__magic_name__ :int = False
__magic_name__ :Optional[int] = False
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = DebertaModelTester(self )
lowerCAmelCase__ :Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Dict = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :Any = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ :Any = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 93
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return sum((va - va) ** 2 for va, va in zip(lowercase_, lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase_ ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])', number=1_0_0_0_0, globals=globals(), ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])', number=1_0_0_0_0, globals=globals(), ) )
benchmark()
| 720
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case = sys.version_info >= (3, 1_0)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ):
return field(default_factory=lambda: default, metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : str
UpperCAmelCase_ : bool
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int = 42
UpperCAmelCase_ : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[bool] = None
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = "titi"
UpperCAmelCase_ : str = "toto"
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "titi"
UpperCAmelCase_ : Tuple = "toto"
UpperCAmelCase_ : Any = 42
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : BasicEnum = "toto"
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicEnum(self.foo )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : MixedTypeEnum = "toto"
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[float] = field(default=__magic_name__ , metadata={"help": "help message"} )
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] )
UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[int] = list_field(default=[] )
UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
UpperCAmelCase_ : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[int] = field()
UpperCAmelCase_ : str = field()
UpperCAmelCase_ : BasicEnum = field()
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : "BasicEnum" = field()
UpperCAmelCase_ : "Optional[bool]" = None
UpperCAmelCase_ : "str" = field(default="toto" , metadata={"help": "help message"} )
UpperCAmelCase_ : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : bool | None = None
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int | None = None
UpperCAmelCase_ : float | None = field(default=__magic_name__ , metadata={"help": "help message"} )
UpperCAmelCase_ : str | None = None
UpperCAmelCase_ : list[str] | None = list_field(default=[] )
UpperCAmelCase_ : list[int] | None = list_field(default=[] )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self , lowercase__ , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowercase__ ).items() if k != 'container'}
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowercase__ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase__ ) and yy.get('choices' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase__ ) , yy['type'](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--bar' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--baz' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--flag' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
expected.add_argument('--baz' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase__ , dest='baz' )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
SCREAMING_SNAKE_CASE = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def A ( self ) -> Dict:
"""simple docstring"""
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Literal["titi", "toto", 42] = "toto"
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--bar' , default=lowercase__ , type=lowercase__ , help='help message' )
expected.add_argument('--baz' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase__ )
SCREAMING_SNAKE_CASE = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--required_str' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
SCREAMING_SNAKE_CASE = parser.parse_dict(lowercase__ )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , 'temp_json' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , 'temp_yaml' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 406
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1_000 , ):
SCREAMING_SNAKE_CASE_ : int =parent
SCREAMING_SNAKE_CASE_ : str =batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] =image_size
SCREAMING_SNAKE_CASE_ : Dict =patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =text_seq_length
SCREAMING_SNAKE_CASE_ : int =is_training
SCREAMING_SNAKE_CASE_ : int =use_input_mask
SCREAMING_SNAKE_CASE_ : str =use_token_type_ids
SCREAMING_SNAKE_CASE_ : str =use_labels
SCREAMING_SNAKE_CASE_ : Any =vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : str =num_hidden_layers
SCREAMING_SNAKE_CASE_ : int =num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE_ : Dict =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int =max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =coordinate_size
SCREAMING_SNAKE_CASE_ : int =shape_size
SCREAMING_SNAKE_CASE_ : List[str] =num_labels
SCREAMING_SNAKE_CASE_ : Any =num_choices
SCREAMING_SNAKE_CASE_ : str =scope
SCREAMING_SNAKE_CASE_ : Tuple =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : Optional[Any] =text_seq_length
SCREAMING_SNAKE_CASE_ : int =(image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : List[str] =self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : Any =bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : List[str] =t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : Dict =bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Dict =bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Any =t
SCREAMING_SNAKE_CASE_ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : str =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] =random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : str =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : List[str] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict =model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str =model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : Dict =model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : Optional[int] =model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict =self.num_labels
SCREAMING_SNAKE_CASE_ : Dict =LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict =model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_ : Any =LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] =model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] =model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : Dict =config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] ={
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return True
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : int =LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Optional[int] =copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
SCREAMING_SNAKE_CASE_ : Dict =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __lowerCamelCase ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.default_image_processor
SCREAMING_SNAKE_CASE_ : str =prepare_img()
SCREAMING_SNAKE_CASE_ : int =image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE_ : Tuple =model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
SCREAMING_SNAKE_CASE_ : str =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 220
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCamelCase__ : List[str] = {"UserAgent": UserAgent().random}
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = script.contents[0]
SCREAMING_SNAKE_CASE_ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self , _A):
SCREAMING_SNAKE_CASE_ = f"""https://www.instagram.com/{username}/"""
SCREAMING_SNAKE_CASE_ = self.get_json()
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = requests.get(self.url , headers=_A).text
SCREAMING_SNAKE_CASE_ = BeautifulSoup(_A , 'html.parser').find_all('script')
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__( self):
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self):
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def lowerCAmelCase__ ( self):
return self.user_data["username"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["full_name"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["biography"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["business_email"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["external_url"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["edge_follow"]["count"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["profile_pic_url_hd"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["is_verified"]
@property
def lowerCAmelCase__ ( self):
return self.user_data["is_private"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str = "github" ):
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE_ = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[Any] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 620
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCAmelCase : Optional[datasets.Features] = None
__lowerCAmelCase : str = "utf-8"
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : bool = True # deprecated
__lowerCAmelCase : Optional[int] = None # deprecated
__lowerCAmelCase : int = 10 << 20 # 10MB
__lowerCAmelCase : Optional[bool] = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : int = JsonConfig
def lowerCAmelCase__ ( self):
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def lowerCAmelCase__ ( self , _A):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_A , (str, list, tuple)):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_A) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files}))
return splits
def lowerCAmelCase__ ( self , _A):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(_A).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(_A , self.config.features.arrow_schema)
return pa_table
def lowerCAmelCase__ ( self , _A):
for file_idx, file in enumerate(itertools.chain.from_iterable(_A)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE_ = json.load(_A)
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple)):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A)
yield file_idx, self._cast_table(_A)
# If the file has one json object per line
else:
with open(_A , 'rb') as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10)
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=_A).encode('utf-8')
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid)
and "straddling" not in str(_A)
or block_size > len(_A)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE_ = json.load(_A)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE_ = {col: [row.get(_A) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(_A)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(_A)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(_A)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A)
batch_idx += 1
| 620
| 1
|
from math import ceil
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ = 10_01 ):
lowerCamelCase_ : List[str] = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ : Tuple = 2 * i + 1
lowerCamelCase_ : List[Any] = 2 * i
lowerCamelCase_ : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowercase : Any =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 364
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowercase : Dict =HfApi()
_lowercase : str ={}
# fmt: off
_lowercase : Union[str, Any] =torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowercase : Optional[Any] =torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowercase : Union[str, Any] =torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowercase : Optional[Any] =torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowercase : Tuple =torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowercase : int =torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowercase : Tuple =torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowercase : Optional[int] =torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowercase : List[Any] =torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowercase : List[Any] =torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowercase : int =torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowercase : List[Any] =torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowercase : Any =torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowercase : List[str] =torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowercase : Tuple =torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowercase : Optional[int] =api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowercase : str ="""/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
_lowercase : str =UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
_lowercase : Dict =UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowercase : Optional[Any] =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowercase : Dict =torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowercase : Union[str, Any] =model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 364
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ = Features({'''audio''': Audio()} )
UpperCAmelCase__ = Features({'''transcription''': Value('''string''' )} )
UpperCAmelCase__ = "audio"
UpperCAmelCase__ = "transcription"
def snake_case__ ( self : Optional[Any] , lowercase__ : List[str] ) ->List[str]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
_UpperCamelCase : Tuple = copy.deepcopy(self )
_UpperCamelCase : Dict = self.input_schema.copy()
_UpperCamelCase : Union[str, Any] = features[self.audio_column]
_UpperCamelCase : Dict = input_schema
return task_template
@property
def snake_case__ ( self : Tuple ) ->Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 204
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : Any = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] =logging.get_logger(__name__)
a__ : Optional[int] ='''▁'''
a__ : str ={
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a__ : Dict ={
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a__ : int ={
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
a__ : List[Any] =['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a__ : int ={'''mustc''': MUSTC_LANGS}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple =MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] =[]
def __init__( self : Union[str, Any] , __A : int , __A : Optional[Any] , __A : Dict="<s>" , __A : List[str]="</s>" , __A : Optional[int]="<pad>" , __A : Optional[Any]="<unk>" , __A : Tuple=False , __A : Union[str, Any]=False , __A : int=None , __A : List[str]=None , __A : Optional[Dict[str, Any]] = None , **__A : Dict , ):
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , do_upper_case=__A , do_lower_case=__A , tgt_lang=__A , lang_codes=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase = do_upper_case
__UpperCamelCase = do_lower_case
__UpperCamelCase = load_json(__A )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
__UpperCamelCase = spm_file
__UpperCamelCase = load_spm(__A , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCamelCase = lang_codes
__UpperCamelCase = LANGUAGES[lang_codes]
__UpperCamelCase = [f'''<lang:{lang}>''' for lang in self.langs]
__UpperCamelCase = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
__UpperCamelCase = self.lang_tokens
__UpperCamelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCamelCase = {}
@property
def _lowerCamelCase ( self : str ):
return len(self.encoder )
@property
def _lowerCamelCase ( self : Dict ):
return self._tgt_lang
@tgt_lang.setter
def _lowerCamelCase ( self : Optional[int] , __A : Optional[Any] ):
__UpperCamelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : str ):
__UpperCamelCase = self.lang_code_to_id[tgt_lang]
__UpperCamelCase = [lang_code_id]
def _lowerCamelCase ( self : Any , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowerCamelCase ( self : Dict , __A : Optional[Any] ):
return self.encoder.get(__A , self.encoder[self.unk_token] )
def _lowerCamelCase ( self : Optional[int] , __A : int ):
return self.decoder.get(__A , self.unk_token )
def _lowerCamelCase ( self : str , __A : List[str] ):
__UpperCamelCase = []
__UpperCamelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCamelCase = self.sp_model.decode(__A )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCamelCase = []
else:
current_sub_tokens.append(__A )
__UpperCamelCase = self.sp_model.decode(__A )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _lowerCamelCase ( self : str , __A : Any , __A : Optional[Any]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__UpperCamelCase = [1] * len(self.prefix_tokens )
__UpperCamelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self : List[str] , __A : Dict ):
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
__UpperCamelCase = Path(__A )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
__UpperCamelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def lowercase__ ( __lowercase : str , __lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
__UpperCamelCase = sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def lowercase__ ( __lowercase : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__lowercase , 'r' ) as f:
return json.load(__lowercase )
def lowercase__ ( __lowercase : Any , __lowercase : str ) -> None:
"""simple docstring"""
with open(__lowercase , 'w' ) as f:
json.dump(__lowercase , __lowercase , indent=2 )
| 399
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] ={
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] =[
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """dpt"""
def __init__( self , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=3_8_4 , lowerCamelCase_=1_6 , lowerCamelCase_=3 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=[2, 5, 8, 1_1] , lowerCamelCase_="project" , lowerCamelCase_=[4, 2, 1, 0.5] , lowerCamelCase_=[9_6, 1_9_2, 3_8_4, 7_6_8] , lowerCamelCase_=2_5_6 , lowerCamelCase_=-1 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=0.4 , lowerCamelCase_=2_5_5 , lowerCamelCase_=0.1 , lowerCamelCase_=[1, 1_0_2_4, 2_4, 2_4] , lowerCamelCase_=[0, 1] , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Any:
super().__init__(**lowerCamelCase_ )
_a : Any = hidden_size
_a : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_a : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
_a : List[Any] = BitConfig(**lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
_a : Optional[Any] = BitConfig(**lowerCamelCase_ )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_a : Union[str, Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_a : Tuple = backbone_featmap_shape
_a : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_a : List[Any] = None
_a : List[str] = None
_a : Optional[Any] = []
_a : int = num_hidden_layers
_a : Dict = num_attention_heads
_a : Tuple = intermediate_size
_a : Optional[int] = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : List[Any] = image_size
_a : Optional[Any] = patch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = qkv_bias
_a : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_a : Dict = readout_type
_a : Optional[int] = reassemble_factors
_a : Tuple = neck_hidden_sizes
_a : List[str] = fusion_hidden_size
_a : List[str] = head_in_index
_a : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_a : List[str] = use_auxiliary_head
_a : str = auxiliary_loss_weight
_a : Dict = semantic_loss_ignore_index
_a : int = semantic_classifier_dropout
def __UpperCamelCase ( self ) -> List[str]:
_a : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a : Any = self.backbone_config.to_dict()
_a : Optional[Any] = self.__class__.model_type
return output
| 424
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[str] = Mock()
_a : str = conn, Mock()
_a : Union[str, Any] = iter([1, None] )
_a : List[str] = lambda A : next(A )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 424
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( _UpperCAmelCase ):
a_ ="""openai/whisper-base"""
a_ =(
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
a_ ="""transcriber"""
a_ =WhisperProcessor
a_ =WhisperForConditionalGeneration
a_ =["""audio"""]
a_ =["""text"""]
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return self.pre_processor(__UpperCAmelCase , return_tensors="pt" ).input_features
def UpperCAmelCase ( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
return self.model.generate(inputs=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0]
| 339
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''CLIPFeatureExtractor''']
a_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 339
| 1
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = """sequence-classification"""
def __init__( self : List[Any] , __snake_case : List[Any] )-> Any:
if type(__snake_case ) == dict:
snake_case = Namespace(**__snake_case )
snake_case = glue_output_modes[hparams.task]
snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(__snake_case , __snake_case , self.mode )
def lowerCAmelCase ( self : int , **__snake_case : Tuple )-> List[Any]:
return self.model(**__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] )-> List[str]:
snake_case = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case = self(**__snake_case )
snake_case = outputs[0]
snake_case = self.trainer.lr_schedulers[0]["""scheduler"""]
snake_case = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
snake_case = self.hparams
snake_case = processors[args.task]()
snake_case = processor.get_labels()
for mode in ["train", "dev"]:
snake_case = self._feature_file(__snake_case )
if os.path.exists(__snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
snake_case = convert_examples_to_features(
__snake_case , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __snake_case )
torch.save(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] = False )-> List[str]:
snake_case = """dev""" if mode == """test""" else mode
snake_case = self._feature_file(__snake_case )
logger.info("""Loading features from cached file %s""" , __snake_case )
snake_case = torch.load(__snake_case )
snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__snake_case , __snake_case , __snake_case , __snake_case ) , batch_size=__snake_case , shuffle=__snake_case , )
def lowerCAmelCase ( self : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] )-> List[Any]:
snake_case = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case = self(**__snake_case )
snake_case , snake_case = outputs[:2]
snake_case = logits.detach().cpu().numpy()
snake_case = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
snake_case = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
snake_case = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case = np.argmax(__snake_case , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case = np.squeeze(__snake_case )
snake_case = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case = [[] for _ in range(out_label_ids.shape[0] )]
snake_case = [[] for _ in range(out_label_ids.shape[0] )]
snake_case = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __snake_case , __snake_case )}
snake_case = dict(results.items() )
snake_case = results
return ret, preds_list, out_label_list
def lowerCAmelCase ( self : Dict , __snake_case : List[str] )-> int:
snake_case , snake_case , snake_case = self._eval_end(__snake_case )
snake_case = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase ( self : List[str] , __snake_case : int )-> Any:
snake_case , snake_case , snake_case = self._eval_end(__snake_case )
snake_case = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase ( __snake_case : str , __snake_case : Dict )-> int:
BaseTransformer.add_model_specific_args(__snake_case , __snake_case )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=__snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__snake_case , required=__snake_case , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
snake_case = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case = GLUETransformer(_UpperCamelCase )
snake_case = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_UpperCamelCase ) )
snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> set:
snake_case = set()
# edges = list of graph's edges
snake_case = get_edges(__lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
snake_case , snake_case = edges.pop()
chosen_vertices.add(__lowerCAmelCase )
chosen_vertices.add(__lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCAmelCase )
return chosen_vertices
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> set:
snake_case = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 517
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : Dict ) ->List[str]:
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger '''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCamelCase )
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = generator.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case__( self : List[str] ) ->Tuple:
snake_case_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger '''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
snake_case_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 39
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = use_labels
A__ = scope
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict:
'''simple docstring'''
A__ = BertGenerationEncoder(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
A__ = True
A__ = BertGenerationEncoder(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any:
'''simple docstring'''
A__ = True
A__ = True
A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = torch.cat([input_mask, next_mask] , dim=-1)
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]:
'''simple docstring'''
A__ = BertGenerationDecoder(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = BertGenerationEncoderTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = '''bert'''
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
self.assertIsNotNone(UpperCAmelCase__)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
A__ = model(UpperCAmelCase__)[0]
A__ = torch.Size([1, 8, 1_024])
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
A__ = model(UpperCAmelCase__)[0]
A__ = torch.Size([1, 8, 50_358])
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
| 87
| 0
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
UpperCAmelCase = img
UpperCAmelCase = img.shape[1]
UpperCAmelCase = img.shape[0]
UpperCAmelCase = dst_width
UpperCAmelCase = dst_height
UpperCAmelCase = self.src_w / self.dst_w
UpperCAmelCase = self.src_h / self.dst_h
UpperCAmelCase = UpperCAmelCase = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def _UpperCamelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase = self.img[self.get_y(A )][self.get_x(A )]
def _UpperCamelCase ( self ,A ):
return int(self.ratio_x * x )
def _UpperCamelCase ( self ,A ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase = 800, 600
_UpperCamelCase = imread("""image_data/lena.jpg""", 1)
_UpperCamelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 720
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74
| 0
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__a: Any = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
__a: str = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
__a: Union[str, Any] = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Dict = 0.0
for i, j in zip(_snake_case , _snake_case ):
n_correct += 1.0 if math_equivalence.is_equiv(_snake_case , _snake_case ) else 0.0
lowercase__ : Union[str, Any] = n_correct / len(_snake_case )
return {
"accuracy": accuracy,
}
| 152
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =KandinskyVaaInpaintPipeline
UpperCamelCase__ : Optional[Any] =["image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCamelCase__ : Optional[int] =[
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCamelCase__ : List[str] =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCamelCase__ : Dict =False
@property
def __a ( self :Any) -> Union[str, Any]:
return 32
@property
def __a ( self :List[str]) -> Dict:
return 32
@property
def __a ( self :str) -> List[str]:
return self.time_input_dim
@property
def __a ( self :Any) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __a ( self :Any) -> Union[str, Any]:
return 100
@property
def __a ( self :Any) -> List[str]:
torch.manual_seed(0)
UpperCAmelCase_ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ = UNetaDConditionModel(**_lowercase)
return model
@property
def __a ( self :List[Any]) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self :List[str]) -> Tuple:
torch.manual_seed(0)
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs)
return model
def __a ( self :Union[str, Any]) -> str:
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , )
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __a ( self :List[str] , _lowercase :Any , _lowercase :int=0) -> int:
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
_lowercase)
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''').resize((256, 256))
# create mask
UpperCAmelCase_ = np.ones((64, 64) , dtype=np.floataa)
UpperCAmelCase_ = 0
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __a ( self :str) -> List[str]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
UpperCAmelCase_ = pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(_lowercase))
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(_lowercase) , return_dict=_lowercase , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}")
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __a ( self :Optional[Any]) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def __a ( self :Dict) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''')
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
UpperCAmelCase_ = np.ones((768, 768) , dtype=np.floataa)
UpperCAmelCase_ = 0
UpperCAmelCase_ = '''a hat'''
UpperCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(_lowercase)
UpperCAmelCase_ = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = pipeline.to(_lowercase)
pipeline.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ , UpperCAmelCase_ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ = pipeline(
image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase)
| 561
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : torch.FloatTensor
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :str=3 , _lowercase :List[str]=3 , _lowercase :Dict=("DownEncoderBlock2D",) , _lowercase :Optional[Any]=(64,) , _lowercase :Optional[Any]=2 , _lowercase :Tuple=32 , _lowercase :int="silu" , _lowercase :Union[str, Any]=True , ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = torch.nn.Convad(
_lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
# down
UpperCAmelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(_lowercase):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
UpperCAmelCase_ = get_down_block(
_lowercase , num_layers=self.layers_per_block , in_channels=_lowercase , out_channels=_lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , )
self.down_blocks.append(_lowercase)
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# out
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowercase , eps=1E-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = 2 * out_channels if double_z else out_channels
UpperCAmelCase_ = nn.Convad(block_out_channels[-1] , _lowercase , 3 , padding=1)
UpperCAmelCase_ = False
def __a ( self :Any , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = x
UpperCAmelCase_ = self.conv_in(_lowercase)
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowercase :Dict):
def custom_forward(*_lowercase :Any):
return module(*_lowercase)
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0'''):
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase) , _lowercase , use_reentrant=_lowercase)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , use_reentrant=_lowercase)
else:
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase) , _lowercase)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , _lowercase)
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase_ = down_block(_lowercase)
# middle
UpperCAmelCase_ = self.mid_block(_lowercase)
# post-process
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
UpperCAmelCase_ = self.conv_act(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
return sample
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[Any]=3 , _lowercase :List[str]=3 , _lowercase :List[str]=("UpDecoderBlock2D",) , _lowercase :int=(64,) , _lowercase :Optional[Any]=2 , _lowercase :List[Any]=32 , _lowercase :Union[str, Any]="silu" , _lowercase :Optional[int]="group" , ) -> Any:
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = nn.Convad(
_lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# up
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowercase):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = reversed_block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
UpperCAmelCase_ = get_up_block(
_lowercase , num_layers=self.layers_per_block + 1 , in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , resnet_time_scale_shift=_lowercase , )
self.up_blocks.append(_lowercase)
UpperCAmelCase_ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase_ = SpatialNorm(block_out_channels[0] , _lowercase)
else:
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowercase , eps=1E-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Convad(block_out_channels[0] , _lowercase , 3 , padding=1)
UpperCAmelCase_ = False
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :List[Any]=None) -> Any:
UpperCAmelCase_ = z
UpperCAmelCase_ = self.conv_in(_lowercase)
UpperCAmelCase_ = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowercase :str):
def custom_forward(*_lowercase :Any):
return module(*_lowercase)
return custom_forward
if is_torch_version('''>=''' , '''1.11.0'''):
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , _lowercase , use_reentrant=_lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase) , _lowercase , _lowercase , use_reentrant=_lowercase)
else:
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , _lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase) , _lowercase , _lowercase)
else:
# middle
UpperCAmelCase_ = self.mid_block(_lowercase , _lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = up_block(_lowercase , _lowercase)
# post-process
if latent_embeds is None:
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
else:
UpperCAmelCase_ = self.conv_norm_out(_lowercase , _lowercase)
UpperCAmelCase_ = self.conv_act(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
return sample
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Dict , _lowercase :str , _lowercase :str=None , _lowercase :int="random" , _lowercase :Tuple=False , _lowercase :Tuple=True) -> Any:
super().__init__()
UpperCAmelCase_ = n_e
UpperCAmelCase_ = vq_embed_dim
UpperCAmelCase_ = beta
UpperCAmelCase_ = legacy
UpperCAmelCase_ = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
UpperCAmelCase_ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap)))
UpperCAmelCase_ = self.used.shape[0]
UpperCAmelCase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase_ = self.re_embed
UpperCAmelCase_ = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
UpperCAmelCase_ = n_e
UpperCAmelCase_ = sane_index_shape
def __a ( self :Dict , _lowercase :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = inds.shape
assert len(_lowercase) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_lowercase)
UpperCAmelCase_ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase_ = match.argmax(-1)
UpperCAmelCase_ = match.sum(2) < 1
if self.unknown_index == "random":
UpperCAmelCase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
UpperCAmelCase_ = self.unknown_index
return new.reshape(_lowercase)
def __a ( self :str , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = inds.shape
assert len(_lowercase) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_lowercase)
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase_ = 0 # simply set to zero
UpperCAmelCase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowercase)
return back.reshape(_lowercase)
def __a ( self :Optional[int] , _lowercase :Union[str, Any]) -> Any:
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase_ = z.permute(0 , 2 , 3 , 1).contiguous()
UpperCAmelCase_ = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase_ = torch.argmin(torch.cdist(_lowercase , self.embedding.weight) , dim=1)
UpperCAmelCase_ = self.embedding(_lowercase).view(z.shape)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase_ = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
UpperCAmelCase_ = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
UpperCAmelCase_ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
UpperCAmelCase_ = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.remap_to_used(_lowercase)
UpperCAmelCase_ = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
UpperCAmelCase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[Any]) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase_ = indices.reshape(shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.unmap_to_all(_lowercase)
UpperCAmelCase_ = indices.reshape(-1) # flatten again
# get quantized latent vectors
UpperCAmelCase_ = self.embedding(_lowercase)
if shape is not None:
UpperCAmelCase_ = z_q.view(_lowercase)
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class a_ ( _snake_case ):
def __init__( self :Tuple , _lowercase :List[str] , _lowercase :Union[str, Any]=False) -> List[Any]:
UpperCAmelCase_ = parameters
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_lowercase , 2 , dim=1)
UpperCAmelCase_ = torch.clamp(self.logvar , -30.0 , 20.0)
UpperCAmelCase_ = deterministic
UpperCAmelCase_ = torch.exp(0.5 * self.logvar)
UpperCAmelCase_ = torch.exp(self.logvar)
if self.deterministic:
UpperCAmelCase_ = UpperCAmelCase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def __a ( self :Optional[Any] , _lowercase :Optional[torch.Generator] = None) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase_ = randn_tensor(
self.mean.shape , generator=_lowercase , device=self.parameters.device , dtype=self.parameters.dtype)
UpperCAmelCase_ = self.mean + self.std * sample
return x
def __a ( self :Tuple , _lowercase :int=None) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Dict=[1, 2, 3]) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0])
UpperCAmelCase_ = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=_lowercase)
def __a ( self :Optional[Any]) -> Optional[int]:
return self.mean
| 561
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Any = MobileBertTokenizer
_lowercase : Optional[Any] = MobileBertTokenizerFast
_lowercase : List[Any] = True
_lowercase : str = True
_lowercase : Tuple = filter_non_english
_lowercase : int = """google/mobilebert-uncased"""
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().setUp()
a__ : Any =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a__ : Optional[Any] =[
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : List[str] ="UNwant\u00E9d,running"
a__ : List[Any] ="unwanted, running"
return input_text, output_text
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[str] =self.tokenizer_class(self.vocab_file )
a__ : List[str] =tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def _lowercase ( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Tuple =self.get_tokenizer()
a__ : str =self.get_rust_tokenizer()
a__ : Optional[int] ="UNwant\u00E9d,running"
a__ : Optional[int] =tokenizer.tokenize(lowerCAmelCase__ )
a__ : Dict =rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int =tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : Any =rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int =self.get_rust_tokenizer()
a__ : int =tokenizer.encode(lowerCAmelCase__ )
a__ : Union[str, Any] =rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# With lower casing
a__ : Optional[Any] =self.get_tokenizer(do_lower_case=lowerCAmelCase__ )
a__ : Optional[Any] =self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__ )
a__ : Tuple ="UNwant\u00E9d,running"
a__ : Optional[int] =tokenizer.tokenize(lowerCAmelCase__ )
a__ : Tuple =rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : Dict =rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int =self.get_rust_tokenizer()
a__ : Optional[int] =tokenizer.encode(lowerCAmelCase__ )
a__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Dict =BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any =BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Any =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
a__ : Union[str, Any] ={}
for i, token in enumerate(lowerCAmelCase__ ):
a__ : Dict =i
a__ : str =WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =self.get_tokenizer()
a__ : Optional[int] =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : str =self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
a__ : Union[str, Any] =tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a__ : Any =tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a__ : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a__ : Tuple =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : Tuple =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : List[Any] =F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a__ : Dict =tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
a__ : str =tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , "do_lower_case" ) else False
a__ : str =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =["的", "人", "有"]
a__ : List[str] ="".join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : List[str] =True
a__ : List[Any] =self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : List[Any] =tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : str =tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : Union[str, Any] =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
a__ : Optional[Any] =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =False
a__ : Any =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : List[str] =self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Union[str, Any] =tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : List[str] =tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
a__ : List[str] =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
a__ : str =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
a__ : Dict =[
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 563
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = ["""input_values""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1_6_0_0_0 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 8_0 , lowerCAmelCase__ = 7_6_0_0 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =do_normalize
a__ : Tuple =return_attention_mask
a__ : str =num_mel_bins
a__ : Any =hop_length
a__ : Optional[Any] =win_length
a__ : int =win_function
a__ : List[str] =frame_signal_scale
a__ : List[str] =fmin
a__ : str =fmax
a__ : Dict =mel_floor
a__ : Any =reduction_factor
a__ : str =win_length * sampling_rate // 1_0_0_0
a__ : List[str] =hop_length * sampling_rate // 1_0_0_0
a__ : Optional[Any] =optimal_fft_length(self.sample_size )
a__ : Any =(self.n_fft // 2) + 1
a__ : List[Any] =window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
a__ : Optional[int] =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
a__ : List[Any] =np.array(lowerCAmelCase__ , np.intaa )
a__ : Optional[Any] =[]
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
a__ : Tuple =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : Any =padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
a__ : Optional[int] =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
a__ : Dict =spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict =self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
a__ : str =None
if audio_target is not None:
a__ : int =self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
a__ : Any =inputs_target["input_values"]
a__ : List[Any] =inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Optional[int] =decoder_attention_mask
return inputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
a__ : List[Any] =isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a__ : Optional[int] =is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
a__ : List[Any] =np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : Optional[Any] =speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] =[speech]
# needed to make pad() work on spectrogram inputs
a__ : Union[str, Any] =self.feature_size
# convert into correct format for padding
if is_target:
a__ : Dict =[self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
a__ : str =BatchFeature({"input_values": features} )
a__ : List[str] =self.num_mel_bins
else:
a__ : List[str] =BatchFeature({"input_values": speech} )
a__ : Optional[int] =self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Any =feature_size_hack
# convert input values to correct format
a__ : List[Any] =padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : Union[str, Any] =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : str =[array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] =input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : str =padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : str =[np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Union[str, Any] =(
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : List[Any] =self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int =padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
a__ : Optional[int] =super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : Optional[Any] =["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 563
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase = [
'good first issue',
'feature request',
'wip',
]
def a ( ) -> Any:
__magic_name__: int = Github(os.environ["""GITHUB_TOKEN"""] )
__magic_name__: Tuple = g.get_repo("""huggingface/accelerate""" )
__magic_name__: List[str] = repo.get_issues(state="""open""" )
for issue in open_issues:
__magic_name__: str = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCAmelCase : i.created_at , reverse=__UpperCAmelCase )
__magic_name__: Dict = comments[0] if len(__UpperCAmelCase ) > 0 else None
__magic_name__: str = dt.utcnow()
__magic_name__: List[str] = (current_time - issue.updated_at).days
__magic_name__: Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 213
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '▁'
__lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowerCamelCase = {
'facebook/mbart-large-50-one-to-many-mmt': 10_24,
}
# fmt: off
__lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["input_ids", "attention_mask"]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : int , __snake_case : str , __snake_case : Tuple=None , __snake_case : Dict=None , __snake_case : Union[str, Any]="</s>" , __snake_case : int="</s>" , __snake_case : int="<s>" , __snake_case : Tuple="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : Tuple="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__: Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
__magic_name__: List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__: str = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__snake_case , tgt_lang=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__magic_name__: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
__magic_name__: Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__: str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__: List[Any] = 1
__magic_name__: List[Any] = len(self.sp_model )
__magic_name__: Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
__magic_name__: Any = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__: Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__: Any = src_lang if src_lang is not None else """en_XX"""
__magic_name__: Dict = self.lang_code_to_id[self._src_lang]
__magic_name__: Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str ) -> None:
__magic_name__: int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Dict:
__magic_name__: int = self.__dict__.copy()
__magic_name__: List[str] = None
return state
def __setstate__( self : Any , __snake_case : Dict ) -> None:
__magic_name__: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__: Optional[Any] = {}
__magic_name__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__magic_name__: List[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : List[str] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowerCamelCase__ ( self : int , __snake_case : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__: Optional[Any] = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
__magic_name__: str = []
__magic_name__: Dict = """"""
__magic_name__: Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
__magic_name__: Dict = True
__magic_name__: Optional[Any] = []
else:
current_sub_tokens.append(__snake_case )
__magic_name__: Union[str, Any] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__: Optional[int] = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
__magic_name__: str = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowerCamelCase__ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
__magic_name__: List[Any] = [1] * len(self.prefix_tokens )
__magic_name__: Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Any , __snake_case : Dict , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Tuple ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__: Union[str, Any] = src_lang
__magic_name__: int = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
__magic_name__: Union[str, Any] = self.convert_tokens_to_ids(__snake_case )
__magic_name__: int = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[str] , __snake_case : str = "en_XX" , __snake_case : Optional[List[str]] = None , __snake_case : str = "ro_RO" , **__snake_case : List[Any] , ) -> BatchEncoding:
__magic_name__: List[Any] = src_lang
__magic_name__: List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Any ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Any , __snake_case : str ) -> None:
__magic_name__: Any = self.lang_code_to_id[src_lang]
__magic_name__: str = [self.cur_lang_code_id]
__magic_name__: Tuple = [self.eos_token_id]
def lowerCamelCase__ ( self : Tuple , __snake_case : str ) -> None:
__magic_name__: int = self.lang_code_to_id[tgt_lang]
__magic_name__: Dict = [self.cur_lang_code_id]
__magic_name__: Optional[int] = [self.eos_token_id]
| 213
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCamelCase ( UpperCamelCase__ = "AAPL" ):
UpperCAmelCase__ : Union[str, Any] = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCAmelCase__ : int = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" )
UpperCAmelCase__ : Optional[Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 407
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a__ = get_tests_dir('''fixtures/dummy-config.json''')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = 0
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : int = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__UpperCamelCase : List[Any] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCamelCase : Union[str, Any] = os.path.join(lowerCAmelCase , """fake-roberta""" )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
__UpperCamelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(type(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> str:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoConfig.register("""model""" , lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoConfig.register("""bert""" , lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : Optional[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase )
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__UpperCamelCase : Tuple = AutoConfig.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase , revision="""aaaaaa""" )
def lowerCamelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
__UpperCamelCase : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
__UpperCamelCase : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase )
__UpperCamelCase : str = AutoConfig.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : int = 'new-model'
try:
AutoConfig.register("""new-model""" , lowerCAmelCase )
# If remote code is not set, the default is to use local
__UpperCamelCase : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
__UpperCamelCase : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
__UpperCamelCase : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 279
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[Any] = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Any = values[index] + knapsack(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 386
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = 42
class A_ ( _a , _a ):
@register_to_config
def __init__( self: List[Any] ,__lowerCAmelCase: int = 65_536 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: str = "fourier" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,__lowerCAmelCase: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,__lowerCAmelCase: Tuple[str] = "UNetMidBlock1D" ,__lowerCAmelCase: str = None ,__lowerCAmelCase: Tuple[int] = (32, 32, 64) ,__lowerCAmelCase: str = None ,__lowerCAmelCase: int = 8 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase : Optional[Any] = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=__lowerCAmelCase ,log=__lowerCAmelCase ,flip_sin_to_cos=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase : Any = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=__lowerCAmelCase ,downscale_freq_shift=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase : str = block_out_channels[0] * 4
_lowerCamelCase : str = TimestepEmbedding(
in_channels=__lowerCAmelCase ,time_embed_dim=__lowerCAmelCase ,act_fn=__lowerCAmelCase ,out_dim=block_out_channels[0] ,)
_lowerCamelCase : int = nn.ModuleList([] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Tuple = nn.ModuleList([] )
_lowerCamelCase : List[str] = None
# down
_lowerCamelCase : List[Any] = in_channels
for i, down_block_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = output_channel
_lowerCamelCase : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase : Tuple = i == len(__lowerCAmelCase ) - 1
_lowerCamelCase : List[Any] = get_down_block(
__lowerCAmelCase ,num_layers=__lowerCAmelCase ,in_channels=__lowerCAmelCase ,out_channels=__lowerCAmelCase ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(__lowerCAmelCase )
# mid
_lowerCamelCase : Optional[Any] = get_mid_block(
__lowerCAmelCase ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=__lowerCAmelCase ,add_downsample=__lowerCAmelCase ,)
# up
_lowerCamelCase : Optional[int] = list(reversed(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase : Tuple = out_channels
else:
_lowerCamelCase : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = output_channel
_lowerCamelCase : List[str] = (
reversed_block_out_channels[i + 1] if i < len(__lowerCAmelCase ) - 1 else final_upsample_channels
)
_lowerCamelCase : Union[str, Any] = i == len(__lowerCAmelCase ) - 1
_lowerCamelCase : Tuple = get_up_block(
__lowerCAmelCase ,num_layers=__lowerCAmelCase ,in_channels=__lowerCAmelCase ,out_channels=__lowerCAmelCase ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(__lowerCAmelCase )
_lowerCamelCase : Dict = output_channel
# out
_lowerCamelCase : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
_lowerCamelCase : List[Any] = get_out_block(
out_block_type=__lowerCAmelCase ,num_groups_out=__lowerCAmelCase ,embed_dim=block_out_channels[0] ,out_channels=__lowerCAmelCase ,act_fn=__lowerCAmelCase ,fc_dim=block_out_channels[-1] // 4 ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Dict = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : int = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCamelCase : Optional[Any] = timesteps[None].to(sample.device )
_lowerCamelCase : Dict = self.time_proj(__lowerCAmelCase )
if self.config.use_timestep_embedding:
_lowerCamelCase : Any = self.time_mlp(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = timestep_embed[..., None]
_lowerCamelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase : Any = ()
for downsample_block in self.down_blocks:
_lowerCamelCase, _lowerCamelCase : Dict = downsample_block(hidden_states=__lowerCAmelCase ,temb=__lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase : Union[str, Any] = self.mid_block(__lowerCAmelCase ,__lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase : Any = down_block_res_samples[-1:]
_lowerCamelCase : Tuple = down_block_res_samples[:-1]
_lowerCamelCase : str = upsample_block(__lowerCAmelCase ,res_hidden_states_tuple=__lowerCAmelCase ,temb=__lowerCAmelCase )
# 5. post-process
if self.out_block:
_lowerCamelCase : List[str] = self.out_block(__lowerCAmelCase ,__lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCAmelCase )
| 386
| 1
|
'''simple docstring'''
from __future__ import annotations
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ : Dict = len(_snake_case ), len(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase__ ( self ,_snake_case ):
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase__ ( self ):
# searches pattern in text and returns index positions
UpperCAmelCase_ : str = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ : str = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
UpperCAmelCase_ : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase = """ABAABA"""
_lowerCamelCase = """AB"""
_lowerCamelCase = BoyerMooreSearch(text, pattern)
_lowerCamelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 71
|
'''simple docstring'''
def _A ( _lowerCAmelCase = 2_000_000 ):
"""simple docstring"""
__lowercase =[0 for i in range(n + 1 )]
__lowercase =1
__lowercase =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowerCAmelCase ):
__lowercase =1
__lowercase =0
for i in range(_lowerCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 474
| 0
|
'''simple docstring'''
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE , int(b / 2 ) )
else:
return a * actual_power(SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE , int(b / 2 ) )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return actual_power(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(power(-2, -3))
| 711
|
import argparse
import os
import re
__snake_case : str = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__snake_case : Optional[Any] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__snake_case : Optional[Any] = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ :Optional[int] = f.read()
UpperCAmelCase__ :str = content.split('\n' )
UpperCAmelCase__ :List[Any] = []
UpperCAmelCase__ :Union[str, Any] = 0
while line_idx < len(SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCAmelCase__ :Union[str, Any] = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCAmelCase__ :Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCAmelCase__ :Optional[int] = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCAmelCase__ :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : _re_identifier.search(SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(SCREAMING_SNAKE_CASE ) )
elif "\n".join(SCREAMING_SNAKE_CASE ) != content:
return True
def A ( SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
UpperCAmelCase__ :int = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for f in os.listdir(SCREAMING_SNAKE_CASE ) if f.endswith('.py' )]
UpperCAmelCase__ :str = [sort_auto_mapping(SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :List[Any] = [f for f, d in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 433
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.