code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = "rwkv"
__UpperCamelCase : Any = {"max_position_embeddings": "context_length"}
def __init__( self , __SCREAMING_SNAKE_CASE=50_277 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[int] = context_length
UpperCamelCase : str = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCamelCase : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCamelCase : List[str] = layer_norm_epsilon
UpperCamelCase : Optional[int] = rescale_every
UpperCamelCase : Dict = use_cache
UpperCamelCase : str = bos_token_id
UpperCamelCase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 1
|
from numpy import exp, pi, sqrt
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
# TODO: is there an appropriate internal test set?
__UpperCamelCase : List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.get_dummy_inputs()
UpperCamelCase : Dict = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[str] = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : int = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs()
UpperCamelCase : Any = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.get_dummy_inputs()
UpperCamelCase : List[Any] = pipe(**__SCREAMING_SNAKE_CASE ).images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : int = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = ort.SessionOptions()
UpperCamelCase : Optional[Any] = False
return options
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase : Union[str, Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = '''A fantasy landscape, trending on artstation'''
UpperCamelCase : Any = torch.manual_seed(0 )
UpperCamelCase : str = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Any = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase : int = init_image.resize((128, 128) )
UpperCamelCase : Dict = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
UpperCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
UpperCamelCase : Any = torch.manual_seed(0 )
UpperCamelCase : List[Any] = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
UpperCamelCase : Any = output.images
UpperCamelCase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 643
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[float] ):
"""simple docstring"""
UpperCamelCase : Tuple = 0.00
UpperCamelCase : Any = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase : List[Any] = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE_ )
index += 1
return 1 / first_sum
def a ( SCREAMING_SNAKE_CASE_ : list[float] ):
"""simple docstring"""
UpperCamelCase : Dict = 0.00
UpperCamelCase : Dict = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase : Optional[Any] = F"""Resistor at index {index} has a negative value!"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
UpperCamelCase : Tuple = parent
UpperCamelCase : str = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : List[Any] = is_training
UpperCamelCase : List[Any] = use_input_mask
UpperCamelCase : List[Any] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : Dict = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[str] = num_labels
UpperCamelCase : Union[str, Any] = num_choices
UpperCamelCase : Any = scope
UpperCamelCase : List[Any] = projection_dim
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Dict = None
if self.use_token_type_ids:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Tuple = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
UpperCamelCase : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TFDPRContextEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TFDPRQuestionEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFDPRReader(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : int = config_and_inputs
UpperCamelCase : str = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Any = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__UpperCamelCase : Dict = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = TFDPRModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = TFDPRQuestionEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDPRReader.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCamelCase : Tuple = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCamelCase : Optional[Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
if hor == 1_2_8:
UpperCamelCase : int = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
UpperCamelCase : Optional[int] = (3_2, 1_2_8, 2_5_6)
UpperCamelCase : Any = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 3_2:
UpperCamelCase : str = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
UpperCamelCase : Any = (3_2, 6_4, 1_2_8, 2_5_6)
UpperCamelCase : Any = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
UpperCamelCase : str = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
UpperCamelCase : str = model.state_dict()
UpperCamelCase : Dict = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 1_4,
'''out_channels''': 1_4,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_5_5_3_6,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
UpperCamelCase : List[str] = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase : Any = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase : Any = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : int = {
'''in_channels''': 1_4,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (3_2, 6_4, 1_2_8, 2_5_6),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_5_5_3_6,
'''out_channels''': 1_4,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
UpperCamelCase : int = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
UpperCamelCase : Tuple = model
UpperCamelCase : Optional[Any] = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase : Any = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 643
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 1
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCAmelCase : Tuple = "http://www.mocksite.com/file1.txt"
__UpperCAmelCase : Tuple = "\"text\": [\"foo\", \"foo\"]"
__UpperCAmelCase : Optional[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : str = 200
__UpperCamelCase : Optional[int] = {"Content-Length": "100"}
__UpperCamelCase : Dict = {}
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [bytes(__SCREAMING_SNAKE_CASE , '''utf-8''' )]
def a ( *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE_ , '''request''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = URL
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = url
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = [url]
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = {'''train''': url}
UpperCamelCase : int = '''dummy'''
UpperCamelCase : str = '''downloads'''
UpperCamelCase : Dict = tmp_path
UpperCamelCase : Dict = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , use_etag=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[int] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = dl_manager.download(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = [downloaded_paths]
UpperCamelCase : int = [urls]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert "train" in downloaded_paths.keys()
UpperCamelCase : Tuple = downloaded_paths.values()
UpperCamelCase : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCamelCase : Any = Path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCamelCase : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCamelCase : List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
UpperCamelCase : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Any = str(SCREAMING_SNAKE_CASE_ )
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = filename
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = [filename]
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = {'''train''': filename}
UpperCamelCase : Union[str, Any] = '''dummy'''
UpperCamelCase : int = xz_file.parent
UpperCamelCase : Optional[Any] = '''extracted'''
UpperCamelCase : Dict = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE_ , use_etag=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = dl_manager.extract(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = [extracted_paths]
UpperCamelCase : Any = [paths]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert "train" in extracted_paths.keys()
UpperCamelCase : Tuple = extracted_paths.values()
UpperCamelCase : List[Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCamelCase : Any = Path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE_ , etag=SCREAMING_SNAKE_CASE_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCamelCase : Optional[int] = extracted_path.read_text()
UpperCamelCase : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
UpperCamelCase : Optional[int] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Any = request.getfixturevalue(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Any = request.getfixturevalue(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert num_tar == 1
assert num_jsonl == 2
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 643
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[int] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase : List[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 1
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
import os
import numpy
import onnx
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : List[Any] = a.name
UpperCamelCase : List[Any] = b.name
UpperCamelCase : Optional[int] = ''''''
UpperCamelCase : Any = ''''''
UpperCamelCase : Any = a == b
UpperCamelCase : Tuple = name_a
UpperCamelCase : List[Any] = name_b
return res
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = list(model.graph.initializer )
UpperCamelCase : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase : Union[str, Any] = inits[i].name
UpperCamelCase : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : List[str] = os.path.dirname(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = os.path.basename(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = onnx.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Any = list(model.graph.initializer )
UpperCamelCase : List[Any] = set()
UpperCamelCase : List[str] = {}
UpperCamelCase : List[Any] = []
UpperCamelCase : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE_ )
dup_set.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = inits[j].data_type
UpperCamelCase : Optional[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('''unexpected data type: ''' , SCREAMING_SNAKE_CASE_ )
total_reduced_size += mem_size
UpperCamelCase : Optional[Any] = inits[i].name
UpperCamelCase : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , '''GB''' )
UpperCamelCase : Optional[Any] = sorted(SCREAMING_SNAKE_CASE_ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = '''optimized_''' + model_file_name
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
onnx.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return new_model
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Optional[Any] = None
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
UpperCamelCase : Union[str, Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCamelCase : List[str] = str(distributed_port + 1 )
UpperCamelCase : int = dist.new_group(ranks=__SCREAMING_SNAKE_CASE , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase ( self ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=torch.floataa ):
"""simple docstring"""
UpperCamelCase : List[Any] = torch.empty(__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
dist.scatter(__SCREAMING_SNAKE_CASE , src=0 , scatter_list=__SCREAMING_SNAKE_CASE , group=self.process_group )
return target_tensor
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCamelCase : Tuple = next((addr for addr in addrs if addr.startswith('''e''' )) , __SCREAMING_SNAKE_CASE )
return ifname
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not dist.is_initialized():
UpperCamelCase , UpperCamelCase : List[str] = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE )
# distributed training
UpperCamelCase : str = dist.get_world_size(group=self.process_group )
# gather logic
UpperCamelCase : Any = None
if self._is_main():
UpperCamelCase : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__SCREAMING_SNAKE_CASE )]
dist.gather(torch.tensor(__SCREAMING_SNAKE_CASE ) , dst=0 , gather_list=__SCREAMING_SNAKE_CASE , group=self.process_group )
# scatter logic
UpperCamelCase : str = question_hidden_states.shape[0]
UpperCamelCase : Any = []
UpperCamelCase : Tuple = []
if self._is_main():
assert len(__SCREAMING_SNAKE_CASE ) == world_size
UpperCamelCase , UpperCamelCase : List[str] = self._main_retrieve(torch.cat(__SCREAMING_SNAKE_CASE ).numpy() , __SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : Optional[Any] = torch.tensor(__SCREAMING_SNAKE_CASE ), torch.tensor(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self._chunk_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self._chunk_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self._scattered(__SCREAMING_SNAKE_CASE , [n_queries, n_docs] , target_type=torch.intaa )
UpperCamelCase : Optional[Any] = self._scattered(__SCREAMING_SNAKE_CASE , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE )
| 643
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Any = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : Tuple = FunnelBaseModel(SCREAMING_SNAKE_CASE_ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 643
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase : Any = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase : Tuple = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase : Dict = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase : Any = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase : Any = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase : Union[str, Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase : List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase : Tuple = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase : str = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase : Optional[Any] = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase : Dict = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase : Optional[int] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCamelCase : str = key.split('''.''' )
UpperCamelCase : Any = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase : str = config.decoder_hidden_size
UpperCamelCase : Dict = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase : Any = val[:dim, :]
UpperCamelCase : Optional[Any] = val[dim : dim * 2, :]
UpperCamelCase : List[str] = val[-dim:, :]
elif "bias" in key:
UpperCamelCase : List[str] = val[:dim]
UpperCamelCase : Dict = val[dim : dim * 2]
UpperCamelCase : Union[str, Any] = val[-dim:]
else:
UpperCamelCase : str = config.hidden_size
UpperCamelCase : List[Any] = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase : List[Any] = val[:dim, :]
UpperCamelCase : Tuple = val[dim : dim * 2, :]
UpperCamelCase : Optional[Any] = val[-dim:, :]
elif "bias" in key:
UpperCamelCase : Optional[int] = val[:dim]
UpperCamelCase : Any = val[dim : dim * 2]
UpperCamelCase : Optional[Any] = val[-dim:]
else:
UpperCamelCase : str = val
return orig_state_dict
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase : Tuple = 1_0_2_4
UpperCamelCase : Tuple = 4_0_9_6
UpperCamelCase : Any = 2_4
UpperCamelCase : Union[str, Any] = 1_6
elif "huge" in checkpoint_url:
UpperCamelCase : List[str] = 1_4
UpperCamelCase : Union[str, Any] = 1_2_8_0
UpperCamelCase : Tuple = 5_1_2_0
UpperCamelCase : int = 3_2
UpperCamelCase : str = 1_6
UpperCamelCase : Tuple = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
UpperCamelCase : Tuple = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase : List[str] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCamelCase : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase : List[str] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase : Optional[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase : List[str] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase : Dict = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 643
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Any = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Tuple = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__UpperCAmelCase : Tuple = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = ["input_ids", "attention_mask"]
__UpperCamelCase : int = NllbTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : str = False if not self.vocab_file else True
UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Any = src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[str] = [self.sep_token_id]
UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : Dict = src_lang
UpperCamelCase : List[str] = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "eng_Latn" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fra_Latn" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[str] = src_lang
UpperCamelCase : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : List[Any] = [self.cur_lang_code]
UpperCamelCase : List[Any] = [self.eos_token_id]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
UpperCamelCase : Any = []
UpperCamelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase : Union[str, Any] = [self.cur_lang_code]
UpperCamelCase : List[str] = [self.eos_token_id]
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 1
|
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return math.sqrt(SCREAMING_SNAKE_CASE_ ) * math.sqrt(SCREAMING_SNAKE_CASE_ ) == num
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = 0
UpperCamelCase : Optional[Any] = n
while left <= right:
UpperCamelCase : List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCamelCase : Union[str, Any] = mid - 1
else:
UpperCamelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
UpperCamelCase : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Dict = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[int] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Tuple = []
UpperCamelCase : Optional[int] = []
UpperCamelCase : Tuple = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : List[Any] = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : Dict = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : Tuple = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : int = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : Dict = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : Optional[Any] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : str = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 643
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 1
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 1
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = 10
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = [1, 2, 3, 4]
UpperCamelCase : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__SCREAMING_SNAKE_CASE , self.block_size , 0 ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__SCREAMING_SNAKE_CASE , self.block_size , 0 ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__SCREAMING_SNAKE_CASE , self.block_size , 0 ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCamelCase , UpperCamelCase : List[str] = process_story(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = ''''''
UpperCamelCase , UpperCamelCase : List[str] = process_story(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
self.assertEqual(__SCREAMING_SNAKE_CASE , [] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCamelCase , UpperCamelCase : str = process_story(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = torch.tensor([1, 2, 3, 4] )
UpperCamelCase : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__SCREAMING_SNAKE_CASE , 0 ).numpy() , expected.numpy() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__SCREAMING_SNAKE_CASE , 23 ).numpy() , expected.numpy() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__SCREAMING_SNAKE_CASE , 1 ).numpy() , expected.numpy() )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = 101
UpperCamelCase : Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase : Optional[int] = compute_token_type_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
np.testing.assert_array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 1
|
import math
import tensorflow as tf
from packaging import version
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : List[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCamelCase : int = tf.cast(0.044715 , x.dtype )
UpperCamelCase : List[str] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE_ , 3 )) ))
return x * cdf
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE_ ) )
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tf.cast(0.044715 , x.dtype )
UpperCamelCase : Optional[Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Tuple = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE_ ) , -1_0 , 1_0 )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=-1 ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Optional[int] = tf.split(SCREAMING_SNAKE_CASE_ , 2 , axis=SCREAMING_SNAKE_CASE_ )
return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE_ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE_ , approximate=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase : str = tf.keras.activations.gelu
__UpperCAmelCase : str = approximate_gelu_wrap
else:
__UpperCAmelCase : Union[str, Any] = _gelu
__UpperCAmelCase : Union[str, Any] = _gelu_new
__UpperCAmelCase : str = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 643
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 1
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCAmelCase : Optional[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__UpperCAmelCase : Optional[Any] = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
__UpperCAmelCase : str = "|".join(sys.argv[1:])
__UpperCAmelCase : Tuple = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__UpperCAmelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 643
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 1
|
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCamelCase : list[float] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = degree
def __add__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
UpperCamelCase : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __SCREAMING_SNAKE_CASE )
def __sub__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__SCREAMING_SNAKE_CASE )
return polynomial
def __repr__( self ):
"""simple docstring"""
return self.__str__()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCamelCase : int = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
UpperCamelCase : list[float] = [0] * (self.degree + 2)
UpperCamelCase : Optional[Any] = constant
for i in range(self.degree + 1 ):
UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __SCREAMING_SNAKE_CASE )
def __eq__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not self.__eq__(__SCREAMING_SNAKE_CASE )
| 643
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE_ , '''_dynamo''' ):
return False
return isinstance(SCREAMING_SNAKE_CASE_ , torch._dynamo.eval_frame.OptimizedModule )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool = True ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase : int = is_compiled_module(SCREAMING_SNAKE_CASE_ )
if is_compiled:
UpperCamelCase : Any = model
UpperCamelCase : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
UpperCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , '''forward''' )
UpperCamelCase : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE_ )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE_ , '''__wrapped__''' ):
UpperCamelCase : int = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase : Union[str, Any] = forward
if getattr(SCREAMING_SNAKE_CASE_ , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE_ ):
convert_model(SCREAMING_SNAKE_CASE_ , to_transformer_engine=SCREAMING_SNAKE_CASE_ )
if is_compiled:
UpperCamelCase : Union[str, Any] = model
UpperCamelCase : Dict = compiled_model
return model
def a ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@contextmanager
def a ( **SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
for key, value in kwargs.items():
UpperCamelCase : Optional[int] = str(SCREAMING_SNAKE_CASE_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not hasattr(SCREAMING_SNAKE_CASE_ , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE_ , '''__name__''' ):
UpperCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , '''__class__''' , SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE_ , '''__name__''' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = destination.setdefault(SCREAMING_SNAKE_CASE_ , {} )
merge_dicts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = value
return destination
def a ( SCREAMING_SNAKE_CASE_ : int = None ):
"""simple docstring"""
if port is None:
UpperCamelCase : Dict = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 643
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {"vocab_file": "spiece.model"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<sep>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<cls>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = 3
UpperCamelCase : str = do_lower_case
UpperCamelCase : Optional[Any] = remove_space
UpperCamelCase : List[Any] = keep_accents
UpperCamelCase : Optional[int] = vocab_file
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
UpperCamelCase : str = jieba
UpperCamelCase : Tuple = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self ):
"""simple docstring"""
return len(self.sp_model )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.__dict__.copy()
UpperCamelCase : Tuple = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Any = {}
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase : Tuple = ''' '''.join(inputs.strip().split() )
else:
UpperCamelCase : Any = inputs
UpperCamelCase : List[Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
UpperCamelCase : List[Any] = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
UpperCamelCase : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.preprocess_text(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCamelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase : str = cur_pieces[1:]
else:
UpperCamelCase : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [self.sep_token_id]
UpperCamelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 643
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 1
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : Tuple = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : Optional[int] = use_attention_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Tuple = num_choices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[Any] = None
if self.use_attention_mask:
UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = config_and_inputs
UpperCamelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = True
__UpperCamelCase : Union[str, Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = FlaxBertModelTester(self )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 643
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 1
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a ( SCREAMING_SNAKE_CASE_ : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase : Tuple = []
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple[int, ...] ):
"""simple docstring"""
UpperCamelCase : Dict = []
for d in reversed(SCREAMING_SNAKE_CASE_ ):
idx.append(flat_idx % d )
UpperCamelCase : Optional[Any] = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE_ ) )
@torch.jit.ignore
def a ( SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(SCREAMING_SNAKE_CASE_ : List[bool] ) -> None:
UpperCamelCase : Optional[int] = True
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase : Optional[int] = l[reversed_idx]
if start_edges is None:
UpperCamelCase : Any = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
if end_edges is None:
UpperCamelCase : Union[str, Any] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase : List[Tuple[slice, ...]] = []
UpperCamelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) )
else:
break
UpperCamelCase : Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase : str = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase : str = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a ( SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : int = t.shape[:no_batch_dims]
UpperCamelCase : Union[str, Any] = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase : Any = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) )
# Get an ordered list of slices to perform
UpperCamelCase : Optional[int] = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : Dict[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE_ ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCamelCase : str = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase : str = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] )
def _prep_inputs(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = None
if _out is not None:
UpperCamelCase : str = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase : str = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Chunk the input
if not low_mem:
UpperCamelCase : Optional[Any] = _select_chunk
else:
UpperCamelCase : Optional[Any] = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , )
UpperCamelCase : Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Run the layer on the chunk
UpperCamelCase : Optional[int] = layer(**SCREAMING_SNAKE_CASE_ )
# Allocate space for the output
if out is None:
UpperCamelCase : Any = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def assign(SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assign(SCREAMING_SNAKE_CASE_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase : Optional[int] = da[k]
assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase : Union[str, Any] = xa
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase : Any = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCamelCase : Tuple = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
return out
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 512 , ):
"""simple docstring"""
UpperCamelCase : Dict = max_chunk_size
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[tuple] = None
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[str] = len(__SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase : Any = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase : Tuple = i
UpperCamelCase : Optional[int] = (i + len(__SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = True
for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert type(__SCREAMING_SNAKE_CASE ) == type(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
UpperCamelCase : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[Any] = True
UpperCamelCase : tuple = tree_map(lambda __SCREAMING_SNAKE_CASE : a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase : Optional[Any] = False
if not consistent:
UpperCamelCase : Optional[int] = self._determine_favorable_chunk_size(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
UpperCamelCase : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 643
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : int = 4 ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = abs(SCREAMING_SNAKE_CASE_ ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE_ )] for y in range(SCREAMING_SNAKE_CASE_ )]
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_column(matrix))
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE_ ) )
# OR.. reverse_column(reverse_row(matrix))
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_row(matrix))
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : Tuple = [list(SCREAMING_SNAKE_CASE_ ) for x in zip(*SCREAMING_SNAKE_CASE_ )]
return matrix
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = matrix[::-1]
return matrix
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
UpperCamelCase : Dict = [x[::-1] for x in matrix]
return matrix
def a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
__UpperCAmelCase : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
__UpperCAmelCase : Tuple = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 643
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : Any = "▁"
__UpperCAmelCase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Any = BertGenerationTokenizer
__UpperCamelCase : int = False
__UpperCamelCase : List[Any] = True
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Tuple = BertGenerationTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = '''<s>'''
UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_002 )
def _lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = BertGenerationTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
UpperCamelCase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase : Any = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = '''Hello World!'''
UpperCamelCase : str = [18_536, 2_260, 101]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCamelCase : Tuple = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _lowercase ( self ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase : List[Any] = ''' '''.join(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.big_tokenizer.encode_plus(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , return_token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = BertGenerationConfig()
UpperCamelCase : List[str] = BertGenerationEncoder(__SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__SCREAMING_SNAKE_CASE )
model(**__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 643
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "timesformer"
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="divided_space_time" , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = image_size
UpperCamelCase : str = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : str = num_frames
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : int = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Union[str, Any] = qkv_bias
UpperCamelCase : Tuple = attention_type
UpperCamelCase : int = drop_path_rate
| 643
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=() , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[str]="no" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="29500" ):
"""simple docstring"""
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCamelCase : Optional[Any] = True
elif "IPython" in sys.modules:
UpperCamelCase : str = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCamelCase : List[Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCamelCase : Tuple = 8
UpperCamelCase : List[str] = PrepareForLaunch(__A , distributed_type='''TPU''' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(__A , args=__A , nprocs=__A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr='''127.0.01''' , master_port=__A , mixed_precision=__A ):
UpperCamelCase : Dict = PrepareForLaunch(__A , distributed_type='''MULTI_GPU''' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(__A , args=__A , nprocs=__A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase : Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__A )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=() , SCREAMING_SNAKE_CASE_ : str=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCamelCase : List[str] = PrepareForLaunch(__A , debug=__A )
start_processes(__A , args=__A , nprocs=__A , start_method='''fork''' )
| 701
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 0
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _SCREAMING_SNAKE_CASE , )
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase , UpperCamelCase : str = image[0].size
UpperCamelCase , UpperCamelCase : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCamelCase : Any = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase : Optional[int] = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCamelCase : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase : Optional[int] = 2.0 * image - 1.0
UpperCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return image
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return mask
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase : List[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCamelCase , UpperCamelCase : Union[str, Any] = mask[0].size
UpperCamelCase , UpperCamelCase : List[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase : Union[str, Any] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
UpperCamelCase : List[str] = np.concatenate(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase : Optional[Any] = mask.astype(np.floataa ) / 255.0
UpperCamelCase : List[Any] = 0
UpperCamelCase : Tuple = 1
UpperCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE )
elif isinstance(mask[0] , torch.Tensor ):
UpperCamelCase : List[Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
return mask
class UpperCAmelCase_ ( snake_case__):
'''simple docstring'''
__UpperCamelCase : UNetaDModel
__UpperCamelCase : RePaintScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 250 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = image
UpperCamelCase : int = _preprocess_image(UpperCAmelCase_ )
UpperCamelCase : str = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase : Tuple = _preprocess_mask(UpperCAmelCase_ )
UpperCamelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase : List[Any] = original_image.shape
UpperCamelCase : Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.device )
UpperCamelCase : Any = eta
UpperCamelCase : Optional[Any] = self.scheduler.timesteps[0] + 1
UpperCamelCase : Optional[int] = generator[0] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase : Dict = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase : Optional[Any] = self.scheduler.undo_step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase : Optional[int] = t
UpperCamelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : str = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 702
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = IFImgaImgSuperResolutionPipeline
__UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"})
__UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {"latents"}
def _lowercase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(A_ ).startswith('''mps''' ):
UpperCamelCase : List[str] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowercase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Any = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : str = is_training
UpperCamelCase : Dict = use_attention_mask
UpperCamelCase : List[str] = use_token_type_ids
UpperCamelCase : Tuple = use_labels
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Any = initializer_range
UpperCamelCase : Union[str, Any] = num_choices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( lowercase__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = FlaxAlbertModelTester(self )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' )
UpperCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
UpperCamelCase : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
UpperCamelCase : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase : Any = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 705
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 0
|
'''simple docstring'''
import math
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__UpperCAmelCase : List[str] = "Enter the base and the power separated by a comma: "
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = map(int, input(prompt).split(","))
__UpperCAmelCase , __UpperCAmelCase : List[str] = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__UpperCAmelCase : List[str] = res(xa, ya)
__UpperCAmelCase : List[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 706
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCAmelCase_ ( UpperCAmelCase_):
'''simple docstring'''
__UpperCamelCase : Any = 'transfo-xl'
__UpperCamelCase : int = ['mems']
__UpperCamelCase : Tuple = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __SCREAMING_SNAKE_CASE=267_735 , __SCREAMING_SNAKE_CASE=[20_000, 40_000, 200_000] , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=1_600 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="normal" , __SCREAMING_SNAKE_CASE=0.01 , __SCREAMING_SNAKE_CASE=0.01 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : str = []
self.cutoffs.extend(_lowercase )
if proj_share_all_but_first:
UpperCamelCase : List[Any] = [False] + [True] * len(self.cutoffs )
else:
UpperCamelCase : List[str] = [False] + [False] * len(self.cutoffs )
UpperCamelCase : str = d_model
UpperCamelCase : int = d_embed
UpperCamelCase : Dict = d_head
UpperCamelCase : Optional[int] = d_inner
UpperCamelCase : int = div_val
UpperCamelCase : List[str] = pre_lnorm
UpperCamelCase : Any = n_layer
UpperCamelCase : Dict = n_head
UpperCamelCase : Tuple = mem_len
UpperCamelCase : Optional[int] = same_length
UpperCamelCase : List[str] = attn_type
UpperCamelCase : Optional[int] = clamp_len
UpperCamelCase : int = sample_softmax
UpperCamelCase : List[str] = adaptive
UpperCamelCase : List[str] = dropout
UpperCamelCase : Union[str, Any] = dropatt
UpperCamelCase : int = untie_r
UpperCamelCase : Optional[Any] = init
UpperCamelCase : Union[str, Any] = init_range
UpperCamelCase : Any = proj_init_std
UpperCamelCase : List[Any] = init_std
UpperCamelCase : Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowercase , **_lowercase )
@property
def _lowercase ( self ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 707
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class UpperCAmelCase_ ( __A):
'''simple docstring'''
__UpperCamelCase : List[Any] = "xmod"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=("en_XX",) , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Optional[int] = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : Any = position_embedding_type
UpperCamelCase : List[Any] = use_cache
UpperCamelCase : List[str] = classifier_dropout
UpperCamelCase : Dict = pre_norm
UpperCamelCase : List[Any] = adapter_reduction_factor
UpperCamelCase : List[str] = adapter_layer_norm
UpperCamelCase : Tuple = adapter_reuse_layer_norm
UpperCamelCase : Dict = ln_before_adapter
UpperCamelCase : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = default_language
class UpperCAmelCase_ ( __A):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 708
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 0
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__UpperCAmelCase : str = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
__UpperCAmelCase : List[str] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def a ( ):
"""simple docstring"""
UpperCamelCase : str = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase : Tuple = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = '''rougeLsum'''
UpperCamelCase : Any = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
UpperCamelCase : Optional[Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''rouge1''', '''rouge2''', '''rougeL''']
UpperCamelCase : Optional[int] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
UpperCamelCase : Dict = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .''',
]
UpperCamelCase : Optional[Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = [
'''\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" '''
]
UpperCamelCase : Any = [
''' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCamelCase : List[Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__lowerCAmelCase )['''rougeLsum''']
UpperCamelCase : Union[str, Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCamelCase : Tuple = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase : List[Any] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 709
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 0
|
import math
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase : Optional[int] = n
UpperCamelCase : Union[str, Any] = [
[math.inf for j in range(0 , UpperCamelCase_ )] for i in range(0 , UpperCamelCase_ )
] # adjacency matrix for weight
UpperCamelCase : str = [
[math.inf for j in range(0 , UpperCamelCase_ )] for i in range(0 , UpperCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = w
def _lowercase ( self ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
__UpperCAmelCase : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 710
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCAmelCase_ ( unittest.TestCase):
__UpperCamelCase : Optional[int] = JukeboxTokenizer
__UpperCamelCase : List[str] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _lowercase ( self ):
"""simple docstring"""
import torch
UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCamelCase : Optional[int] = tokenizer(**self.metas )["input_ids"]
# fmt: off
UpperCamelCase : List[str] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
import torch
UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCamelCase : List[str] = tokenizer(**self.metas )["input_ids"]
# fmt: off
UpperCamelCase : int = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 711
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 0
|
from math import sqrt
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( SCREAMING_SNAKE_CASE_ : List[Any] = 1_0_0_0_1 ):
"""simple docstring"""
UpperCamelCase : Dict = 0
UpperCamelCase : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 0
|
__UpperCAmelCase : List[Any] = [0, 2, 4, 6, 8]
__UpperCAmelCase : str = [1, 3, 5, 7, 9]
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCamelCase : Union[str, Any] = 0
for digit in range(1_0 ):
UpperCamelCase : List[str] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __UpperCAmelCase , __UpperCAmelCase )
return result
UpperCamelCase : Union[str, Any] = 0
for digita in range(1_0 ):
UpperCamelCase : Optional[int] = digita
if (remainder + digita) % 2 == 0:
UpperCamelCase : str = ODD_DIGITS
else:
UpperCamelCase : Dict = EVEN_DIGITS
for digita in other_parity_digits:
UpperCamelCase : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __UpperCAmelCase , __UpperCAmelCase , )
return result
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] = 9 ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCAmelCase , 0 , [0] * length , __UpperCAmelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 713
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 0
|
import random
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = num - 1
UpperCamelCase : List[str] = 0
while s % 2 == 0:
UpperCamelCase : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
UpperCamelCase : Tuple = random.randrange(2 , num - 1 )
UpperCamelCase : Any = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if v != 1:
UpperCamelCase : List[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCamelCase : Any = i + 1
UpperCamelCase : Dict = (v**2) % num
return True
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if num < 2:
return False
UpperCamelCase : Optional[Any] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase__ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_0_2_4 ):
"""simple docstring"""
while True:
UpperCamelCase : str = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase__ ):
return num
if __name__ == "__main__":
__UpperCAmelCase : int = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 714
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 0
|
from __future__ import annotations
import math
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__A ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
return min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCamelCase : List[Any] = math.log(len(__A ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , __A , __A , __A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( a__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = PhobertTokenizer
__UpperCamelCase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : str = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
UpperCamelCase : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCamelCase : Optional[int] = ['#version: 0.2', 'l à</w>']
UpperCamelCase : Tuple = {'unk_token': '<unk>'}
UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = 'Tôi là VinAI Research'
UpperCamelCase : Any = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase : List[Any] = 'Tôi là VinAI Research'
UpperCamelCase : Dict = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
UpperCamelCase : Optional[int] = tokenizer.tokenize(_A )
print(_A )
self.assertListEqual(_A , _A )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Dict = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 716
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 0
|
from __future__ import annotations
import pandas as pd
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : str = [0] * no_of_processes
UpperCamelCase : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowerCamelCase ):
UpperCamelCase : str = burst_time[i]
UpperCamelCase : int = 0
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Dict = 9_9_9_9_9_9_9_9_9
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase : str = remaining_time[j]
UpperCamelCase : Tuple = j
UpperCamelCase : Optional[int] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase : int = remaining_time[short]
if minm == 0:
UpperCamelCase : Optional[Any] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
UpperCamelCase : Optional[int] = False
# Find finish time of current process
UpperCamelCase : Union[str, Any] = increment_time + 1
# Calculate waiting time
UpperCamelCase : Optional[int] = finish_time - arrival_time[short]
UpperCamelCase : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase : Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : Any = [0] * no_of_processes
for i in range(_lowerCamelCase ):
UpperCamelCase : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : Optional[Any] = 0
for i in range(_lowerCamelCase ):
UpperCamelCase : List[Any] = total_waiting_time + waiting_time[i]
UpperCamelCase : str = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__UpperCAmelCase : str = int(input())
__UpperCAmelCase : Optional[int] = [0] * no_of_processes
__UpperCAmelCase : str = [0] * no_of_processes
__UpperCAmelCase : str = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = map(int, input().split())
__UpperCAmelCase : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase : Any = burst_time
__UpperCAmelCase : Dict = no_of_processes
__UpperCAmelCase : Optional[int] = waiting_time
__UpperCAmelCase : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__UpperCAmelCase : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 717
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : List[str] = 42
__UpperCamelCase : Optional[int] = 42
__UpperCamelCase : Optional[int] = 0.0
__UpperCamelCase : Dict = 1
__UpperCamelCase : int = 1
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Dict = False
__UpperCamelCase : int = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
a_ : Optional[int] = []
a_ : Optional[int] = []
for i in range(self.num_layers ):
a_ : Optional[int] = self.in_channels if i == 0 else self.out_channels
a_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
a_ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
a_ : List[Any] = resnets
a_ : List[str] = attentions
if self.add_downsample:
a_ : Union[str, Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
a_ : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a_ : int = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
a_ : str = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
a_ : Union[str, Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : List[Any] = 42
__UpperCamelCase : Dict = 42
__UpperCamelCase : Any = 0.0
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = True
__UpperCamelCase : int = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
a_ : str = []
for i in range(self.num_layers ):
a_ : Optional[Any] = self.in_channels if i == 0 else self.out_channels
a_ : Optional[int] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
a_ : str = resnets
if self.add_downsample:
a_ : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
a_ : Optional[int] = ()
for resnet in self.resnets:
a_ : int = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
a_ : Optional[int] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : List[Any] = 42
__UpperCamelCase : Optional[int] = 42
__UpperCamelCase : Dict = 42
__UpperCamelCase : Optional[int] = 0.0
__UpperCamelCase : str = 1
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : str = True
__UpperCamelCase : List[str] = False
__UpperCamelCase : str = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Any = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
a_ : int = []
a_ : List[Any] = []
for i in range(self.num_layers ):
a_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ : Dict = self.prev_output_channel if i == 0 else self.out_channels
a_ : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
a_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
a_ : str = resnets
a_ : Tuple = attentions
if self.add_upsample:
a_ : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a_ : Any = res_hidden_states_tuple[-1]
a_ : Any = res_hidden_states_tuple[:-1]
a_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ : Optional[Any] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
a_ : List[str] = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
a_ : Optional[int] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : Dict = 42
__UpperCamelCase : Union[str, Any] = 42
__UpperCamelCase : List[str] = 42
__UpperCamelCase : Optional[Any] = 0.0
__UpperCamelCase : Optional[int] = 1
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Optional[Any] = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
a_ : Any = []
for i in range(self.num_layers ):
a_ : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ : List[str] = self.prev_output_channel if i == 0 else self.out_channels
a_ : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
a_ : Union[str, Any] = resnets
if self.add_upsample:
a_ : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
a_ : List[str] = res_hidden_states_tuple[-1]
a_ : Tuple = res_hidden_states_tuple[:-1]
a_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ : str = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
a_ : Union[str, Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
__UpperCamelCase : Any = 42
__UpperCamelCase : Optional[Any] = 0.0
__UpperCamelCase : str = 1
__UpperCamelCase : int = 1
__UpperCamelCase : Dict = False
__UpperCamelCase : Dict = False
__UpperCamelCase : List[Any] = jnp.floataa
def _lowercase ( self ):
"""simple docstring"""
a_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a_ : int = []
for _ in range(self.num_layers ):
a_ : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
a_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
a_ : Optional[Any] = resnets
a_ : Any = attentions
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
a_ : int = self.resnets[0](lowerCamelCase_ , lowerCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a_ : str = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
a_ : Optional[int] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
return hidden_states
| 718
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 0
|
import fire
from utils import calculate_rouge, save_json
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict=None , **SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : List[str] = [x.strip() for x in open(_A ).readlines()]
UpperCamelCase : Optional[Any] = [x.strip() for x in open(_A ).readlines()][: len(_A )]
UpperCamelCase : int = calculate_rouge(_A , _A , **_A )
if save_path is not None:
save_json(_A , _A , indent=_A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 719
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = """pix2struct_text_model"""
__UpperCamelCase : Any = ["""past_key_values"""]
__UpperCamelCase : str = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __SCREAMING_SNAKE_CASE=50_244 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : str = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Any = d_kv
UpperCamelCase : Dict = d_ff
UpperCamelCase : Optional[int] = num_layers
UpperCamelCase : Optional[int] = num_heads
UpperCamelCase : int = relative_attention_num_buckets
UpperCamelCase : Optional[Any] = relative_attention_max_distance
UpperCamelCase : int = dropout_rate
UpperCamelCase : Dict = layer_norm_epsilon
UpperCamelCase : Tuple = initializer_factor
UpperCamelCase : List[Any] = use_cache
UpperCamelCase : Optional[int] = eos_token_id
UpperCamelCase : Optional[int] = decoder_start_token_id
# for backwards compatibility
UpperCamelCase : int = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase , UpperCamelCase : List[Any] = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
UpperCamelCase : Optional[int] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Tuple = """pix2struct_vision_model"""
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1e-10 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=128 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**snake_case_ )
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Dict = patch_embed_hidden_size
UpperCamelCase : str = d_ff
UpperCamelCase : Dict = dropout_rate
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Union[str, Any] = initializer_factor
UpperCamelCase : Optional[int] = attention_dropout
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Optional[Any] = dense_act_fn
UpperCamelCase : List[str] = seq_len
UpperCamelCase : Optional[Any] = relative_attention_num_buckets
UpperCamelCase : Tuple = relative_attention_max_distance
UpperCamelCase : List[str] = d_kv
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase , UpperCamelCase : str = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
UpperCamelCase : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = """pix2struct"""
__UpperCamelCase : Any = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
UpperCamelCase : Optional[int] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
UpperCamelCase : Tuple = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
UpperCamelCase : Any = PixaStructTextConfig(**snake_case_ )
UpperCamelCase : Dict = PixaStructVisionConfig(**snake_case_ )
UpperCamelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCamelCase : List[Any] = self.text_config.pad_token_id
UpperCamelCase : Optional[int] = self.text_config.eos_token_id
UpperCamelCase : Optional[int] = initializer_factor
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = self.initializer_range
UpperCamelCase : List[str] = self.initializer_range
UpperCamelCase : Any = is_vqa
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = copy.deepcopy(self.__dict__ )
UpperCamelCase : int = self.text_config.to_dict()
UpperCamelCase : List[Any] = self.vision_config.to_dict()
UpperCamelCase : List[str] = self.__class__.model_type
return output
| 720
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
__UpperCamelCase : Any = "blenderbot-small"
__UpperCamelCase : int = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __SCREAMING_SNAKE_CASE=50_265 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = d_model
UpperCamelCase : List[str] = encoder_ffn_dim
UpperCamelCase : Any = encoder_layers
UpperCamelCase : Optional[Any] = encoder_attention_heads
UpperCamelCase : int = decoder_ffn_dim
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : str = decoder_attention_heads
UpperCamelCase : str = dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : Any = activation_dropout
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : int = init_std
UpperCamelCase : List[str] = encoder_layerdrop
UpperCamelCase : Dict = decoder_layerdrop
UpperCamelCase : List[Any] = use_cache
UpperCamelCase : Optional[Any] = encoder_layers
UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase : int = {0: """batch"""}
UpperCamelCase : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCamelCase : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCamelCase : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase : int = self.num_layers
for i in range(_lowercase ):
UpperCamelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCamelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Any = super().outputs
else:
UpperCamelCase : Optional[int] = super(_lowercase , self ).outputs
if self.use_past:
UpperCamelCase : str = self.num_layers
for i in range(_lowercase ):
UpperCamelCase : str = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
UpperCamelCase : List[str] = seq_length if not self.use_past else 1
UpperCamelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
UpperCamelCase : Optional[Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : Any = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase : Optional[int] = common_inputs["""input_ids"""].shape
UpperCamelCase : Optional[int] = common_inputs["""decoder_input_ids"""].shape[1]
UpperCamelCase : Optional[Any] = self.num_attention_heads
UpperCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Tuple = decoder_seq_length + 3
UpperCamelCase : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : Optional[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
UpperCamelCase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase : List[str] = self.num_layers
UpperCamelCase : List[str] = min(_lowercase , _lowercase )
UpperCamelCase : Optional[Any] = max(_lowercase , _lowercase ) - min_num_layers
UpperCamelCase : Optional[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase : List[Any] = seqlen + 2
UpperCamelCase : Any = self.num_layers
UpperCamelCase : Any = self.num_attention_heads
UpperCamelCase : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : str = common_inputs["""attention_mask"""].dtype
UpperCamelCase : Tuple = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
UpperCamelCase : int = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : Any = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Any = tokenizer.num_special_tokens_to_add(_lowercase )
UpperCamelCase : Dict = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : List[Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
elif self.task == "causal-lm":
UpperCamelCase : int = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Union[str, Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
UpperCamelCase : int = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
| 721
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( _lowercase):
'''simple docstring'''
__UpperCamelCase : List[str] = '''mobilenet_v1'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.999 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.001 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**A_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : str = depth_multiplier
UpperCamelCase : str = min_depth
UpperCamelCase : str = hidden_act
UpperCamelCase : Tuple = tf_padding
UpperCamelCase : Tuple = classifier_dropout_prob
UpperCamelCase : int = initializer_range
UpperCamelCase : Any = layer_norm_eps
class UpperCAmelCase_ ( _lowercase):
'''simple docstring'''
__UpperCamelCase : Any = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
| 700
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Dict = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
UpperCamelCase : Any = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
UpperCamelCase : Tuple = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
UpperCamelCase : List[Any] = '''.'''.join(SCREAMING_SNAKE_CASE_ )
return test_module_path
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = get_module_path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = importlib.import_module(SCREAMING_SNAKE_CASE_ )
return test_module
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Tuple = []
UpperCamelCase : Any = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Union[str, Any] = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , '''all_model_classes''' , [] )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Tuple = get_test_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Dict = test_class()
if hasattr(SCREAMING_SNAKE_CASE_ , '''setUp''' ):
test.setUp()
UpperCamelCase : Union[str, Any] = None
if hasattr(SCREAMING_SNAKE_CASE_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase : Optional[Any] = test.model_tester.__class__
return model_tester
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = get_test_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : int = get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = []
for test_class in test_classes:
UpperCamelCase : Any = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Any = get_test_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ ) for test_class in test_classes}
return test_tester_mapping
def a ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : str = get_model_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_test_mapping
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Dict = get_model_classes(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_to_tester_mapping
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE_ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {to_json(SCREAMING_SNAKE_CASE_ ): to_json(SCREAMING_SNAKE_CASE_ ) for k, v in o.items()}
else:
return o
| 702
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
__UpperCamelCase : int = "gptsan-japanese"
__UpperCamelCase : List[Any] = [
"past_key_values",
]
__UpperCamelCase : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __SCREAMING_SNAKE_CASE=36_000 , __SCREAMING_SNAKE_CASE=1_280 , __SCREAMING_SNAKE_CASE=1_024 , __SCREAMING_SNAKE_CASE=8_192 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE="float32" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.002 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=35_998 , __SCREAMING_SNAKE_CASE=35_995 , __SCREAMING_SNAKE_CASE=35_999 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Optional[Any] = d_ff
UpperCamelCase : int = d_ext
UpperCamelCase : Optional[int] = d_spout
UpperCamelCase : str = num_switch_layers
UpperCamelCase : int = num_ext_layers
UpperCamelCase : List[Any] = num_switch_layers + num_ext_layers
UpperCamelCase : Union[str, Any] = num_heads
UpperCamelCase : Any = num_experts
UpperCamelCase : Dict = expert_capacity
UpperCamelCase : List[Any] = dropout_rate
UpperCamelCase : Tuple = layer_norm_epsilon
UpperCamelCase : Union[str, Any] = router_bias
UpperCamelCase : str = router_jitter_noise
UpperCamelCase : List[str] = router_dtype
UpperCamelCase : Any = router_ignore_padding_tokens
UpperCamelCase : List[Any] = output_hidden_states
UpperCamelCase : Union[str, Any] = output_attentions
UpperCamelCase : Optional[int] = initializer_factor
UpperCamelCase : Dict = output_router_logits
UpperCamelCase : Union[str, Any] = use_cache
super().__init__(
separator_token_id=__A , pad_token_id=__A , eos_token_id=__A , **__A , )
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCamelCase : Union[str, Any] = """xvjiarui/stable-diffusion-2-inpainting"""
UpperCamelCase : List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
UpperCamelCase : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
UpperCamelCase : Optional[int] = 50
UpperCamelCase : Union[str, Any] = jax.device_count()
UpperCamelCase : Tuple = num_samples * [prompt]
UpperCamelCase : int = num_samples * [init_image]
UpperCamelCase : Dict = num_samples * [mask_image]
UpperCamelCase : Tuple = pipeline.prepare_inputs(_a , _a , _a )
# shard inputs and rng
UpperCamelCase : str = replicate(_a )
UpperCamelCase : str = jax.random.split(_a , jax.device_count() )
UpperCamelCase : List[str] = shard(_a )
UpperCamelCase : int = shard(_a )
UpperCamelCase : Tuple = shard(_a )
UpperCamelCase : str = pipeline(
_a , _a , _a , _a , _a , _a , jit=_a )
UpperCamelCase : List[Any] = output.images.reshape(_a , 512 , 512 , 3 )
UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
UpperCamelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : Optional[Any] = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 705
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( __a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "luke"
def __init__( self , __SCREAMING_SNAKE_CASE=50_267 , __SCREAMING_SNAKE_CASE=500_000 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Any = entity_vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Union[str, Any] = entity_emb_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Any = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Optional[Any] = layer_norm_eps
UpperCamelCase : Dict = use_entity_aware_attention
UpperCamelCase : List[Any] = classifier_dropout
| 706
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def a ( ):
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 707
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 1.0 if scale is None else scale
UpperCamelCase : List[Any] = 0.0 if loc is None else loc
super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase ( self ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def _lowercase ( self ):
"""simple docstring"""
return self.variance.sqrt()
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase : List[Any] = args_dim
UpperCamelCase : Tuple = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] )
UpperCamelCase : int = domain_map
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = [proj(lowercase_ ) for proj in self.proj]
return self.domain_map(*lowercase_ )
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = function
def _lowercase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.function(lowercase_ , *lowercase_ )
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Tuple = 42
__UpperCamelCase : Union[str, Any] = 42
__UpperCamelCase : List[str] = 42
def __init__( self , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
UpperCamelCase : str = dim
UpperCamelCase : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowercase_ )
else:
return Independent(self.distribution_class(*lowercase_ ) , 1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : str = self._base_distribution(lowercase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim )
@property
def _lowercase ( self ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.event_shape )
@property
def _lowercase ( self ):
"""simple docstring"""
return 0.0
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ParameterProjection(
in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : Union[str, Any] = StudentT
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase : Optional[Any] = 2.0 + cls.squareplus(lowercase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
__UpperCamelCase : Tuple = {"loc": 1, "scale": 1}
__UpperCamelCase : Tuple = Normal
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = {"total_count": 1, "logits": 1}
__UpperCamelCase : Tuple = NegativeBinomial
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = cls.squareplus(lowercase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ , logits=lowercase_ )
else:
return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 708
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 0
|
from __future__ import annotations
from typing import TypedDict
class UpperCAmelCase_ ( a__):
'''simple docstring'''
__UpperCamelCase : str
__UpperCamelCase : int
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCamelCase : str = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowercase ),
}
return response
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCamelCase : Tuple = int(_lowercase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_lowercase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCamelCase : str = [""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
UpperCamelCase : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = '''Provide a string that I will generate its BWT transform: '''
__UpperCAmelCase : Optional[int] = input(entry_msg).strip()
__UpperCAmelCase : List[Any] = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
__UpperCAmelCase : Any = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 709
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase_ ( UpperCAmelCase__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Tuple = BarthezTokenizer
__UpperCamelCase : List[Any] = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[str] = True
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : int = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCAmelCase )
UpperCamelCase : List[Any] = tokenizer
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''<pad>'''
UpperCamelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCAmelCase ) , 101_122 )
def _lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase : List[str] = [0, 57, 3_018, 70_307, 91, 2]
UpperCamelCase : str = self.tokenizer(
__lowerCAmelCase , max_length=len(__lowerCAmelCase ) , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : Dict = self.get_rust_tokenizer()
UpperCamelCase : List[str] = '''I was born in 92000, and this is falsé.'''
UpperCamelCase : str = tokenizer.tokenize(__lowerCAmelCase )
UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
UpperCamelCase : str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase : str = self.get_rust_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase )
UpperCamelCase : Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase : Union[str, Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__lowerCAmelCase , )
| 710
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = len(_snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _snake_case , _snake_case , )
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = []
depth_first_search([] , [] , [] , _snake_case , _snake_case )
# Print all the boards
for board in boards:
for column in board:
print(_snake_case )
print('''''' )
print(len(_snake_case ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 711
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCamelCase : Dict = 1.0 if scale is None else scale
UpperCamelCase : Optional[int] = 0.0 if loc is None else loc
super().__init__(UpperCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase__ )] )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase ( self ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def _lowercase ( self ):
"""simple docstring"""
return self.variance.sqrt()
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase : Union[str, Any] = args_dim
UpperCamelCase : List[Any] = nn.ModuleList([nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) for dim in args_dim.values()] )
UpperCamelCase : Dict = domain_map
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = [proj(UpperCamelCase__ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase__ )
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Union[str, Any] = function
def _lowercase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.function(UpperCamelCase__ , *UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Dict = 42
__UpperCamelCase : int = 42
__UpperCamelCase : Any = 42
def __init__( self , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
UpperCamelCase : Tuple = dim
UpperCamelCase : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*UpperCamelCase__ )
else:
return Independent(self.distribution_class(*UpperCamelCase__ ) , 1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self._base_distribution(UpperCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase__ , loc=UpperCamelCase__ , scale=UpperCamelCase__ , event_dim=self.event_dim )
@property
def _lowercase ( self ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.event_shape )
@property
def _lowercase ( self ):
"""simple docstring"""
return 0.0
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ParameterProjection(
in_features=UpperCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(UpperCamelCase__ ) + 4.0 )) / 2.0
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
__UpperCamelCase : Any = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : int = StudentT
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase : Any = 2.0 + cls.squareplus(UpperCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
__UpperCamelCase : Dict = {"loc": 1, "scale": 1}
__UpperCamelCase : int = Normal
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = cls.squareplus(UpperCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
__UpperCamelCase : Tuple = {"total_count": 1, "logits": 1}
__UpperCamelCase : Optional[Any] = NegativeBinomial
@classmethod
def _lowercase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = cls.squareplus(UpperCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase__ , logits=UpperCamelCase__ ) , 1 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 712
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
return EnvironmentCommand()
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = parser.add_parser('''env''' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_SCREAMING_SNAKE_CASE , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = accelerate_config_file
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''not installed'''
if is_safetensors_available():
import safetensors
UpperCamelCase : str = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
UpperCamelCase : List[str] = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
UpperCamelCase : List[Any] = '''not installed'''
UpperCamelCase : Optional[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCamelCase : Optional[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCamelCase : List[str] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else f"""\t{accelerate_config}"""
)
UpperCamelCase : str = '''not installed'''
UpperCamelCase : Optional[int] = '''NA'''
if is_torch_available():
import torch
UpperCamelCase : int = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : List[Any] = '''not installed'''
UpperCamelCase : Union[str, Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
UpperCamelCase : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
UpperCamelCase : Union[str, Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCamelCase : Union[str, Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
UpperCamelCase : Any = '''not installed'''
UpperCamelCase : List[str] = '''not installed'''
UpperCamelCase : Dict = '''not installed'''
UpperCamelCase : List[str] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
UpperCamelCase : Optional[Any] = flax.__version__
UpperCamelCase : Any = jax.__version__
UpperCamelCase : Union[str, Any] = jaxlib.__version__
UpperCamelCase : Union[str, Any] = jax.lib.xla_bridge.get_backend().platform
UpperCamelCase : int = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 713
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase_ :
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : str = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase : Any = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCamelCase : List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = inputs["""prompt"""]
UpperCamelCase : Tuple = inputs["""generator"""]
UpperCamelCase : Tuple = inputs["""num_inference_steps"""]
UpperCamelCase : Tuple = inputs["""output_type"""]
if "image" in inputs:
UpperCamelCase : List[str] = inputs["""image"""]
else:
UpperCamelCase : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase : Optional[Any] = inputs["""mask_image"""]
else:
UpperCamelCase : Tuple = None
if "original_image" in inputs:
UpperCamelCase : List[str] = inputs["""original_image"""]
else:
UpperCamelCase : Optional[int] = None
UpperCamelCase : str = pipe.encode_prompt(__SCREAMING_SNAKE_CASE )
# inputs with prompt converted to embeddings
UpperCamelCase : Union[str, Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
UpperCamelCase : int = image
if mask_image is not None:
UpperCamelCase : Optional[Any] = mask_image
if original_image is not None:
UpperCamelCase : str = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs["""generator"""]
UpperCamelCase : List[str] = inputs["""num_inference_steps"""]
UpperCamelCase : List[str] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
UpperCamelCase : Dict = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
UpperCamelCase : Union[str, Any] = image
if mask_image is not None:
UpperCamelCase : List[str] = mask_image
if original_image is not None:
UpperCamelCase : int = original_image
UpperCamelCase : List[str] = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : Optional[int] = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Any = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase : List[Any] = np.abs(to_np(__SCREAMING_SNAKE_CASE ) - to_np(__SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
| 714
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( lowercase__):
'''simple docstring'''
__UpperCamelCase : Tuple = """roformer"""
def __init__( self , __SCREAMING_SNAKE_CASE=50_000 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_536 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , **__lowercase )
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Dict = hidden_size if embedding_size is None else embedding_size
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : Tuple = rotary_value
UpperCamelCase : Tuple = use_cache
class UpperCAmelCase_ ( lowercase__):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Dict = {0: '''batch''', 1: '''sequence'''}
UpperCamelCase : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 715
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Any = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : int = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : int = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
def a ( *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
requires_backends(_lowerCAmelCase , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : int = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Any = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : int = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Any = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Any = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : int = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : int = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : str = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Tuple = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__snake_case):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['torch']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def _lowercase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
| 716
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 0
|
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__UpperCAmelCase : Optional[Any] = '''pytorch_model.bin'''
__UpperCAmelCase : Optional[Any] = '''pytorch_model.bin.index.json'''
__UpperCAmelCase : List[str] = '''adapter_config.json'''
__UpperCAmelCase : str = '''adapter_model.bin'''
__UpperCAmelCase : Tuple = '''adapter_model.safetensors'''
__UpperCAmelCase : int = '''tf_model.h5'''
__UpperCAmelCase : Optional[Any] = '''tf_model.h5.index.json'''
__UpperCAmelCase : str = '''model.ckpt'''
__UpperCAmelCase : Union[str, Any] = '''flax_model.msgpack'''
__UpperCAmelCase : List[Any] = '''flax_model.msgpack.index.json'''
__UpperCAmelCase : List[str] = '''model.safetensors'''
__UpperCAmelCase : Tuple = '''model.safetensors.index.json'''
__UpperCAmelCase : List[Any] = '''config.json'''
__UpperCAmelCase : int = '''preprocessor_config.json'''
__UpperCAmelCase : Any = FEATURE_EXTRACTOR_NAME
__UpperCAmelCase : Optional[int] = '''generation_config.json'''
__UpperCAmelCase : int = '''modelcard.json'''
__UpperCAmelCase : int = '''▁'''
__UpperCAmelCase : Optional[int] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__UpperCAmelCase : Optional[int] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__UpperCAmelCase : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__UpperCAmelCase : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if version.parse(SCREAMING_SNAKE_CASE_ ) < version.parse(SCREAMING_SNAKE_CASE_ ):
if "dev" in min_version:
UpperCamelCase : Union[str, Any] = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCamelCase : str = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 717
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
a_ : List[Any] = parent
a_ : Dict = batch_size
a_ : List[str] = image_size
a_ : Tuple = num_channels
a_ : Any = num_stages
a_ : List[str] = hidden_sizes
a_ : Optional[Any] = depths
a_ : List[str] = is_training
a_ : Optional[int] = use_labels
a_ : List[Any] = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : Optional[int] = type_sequence_label_size
a_ : Optional[int] = initializer_range
a_ : str = out_features
a_ : Optional[int] = num_labels
a_ : Any = scope
a_ : Optional[int] = num_stages
def _lowercase ( self ):
"""simple docstring"""
a_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Any = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowercase ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCamelCase__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCamelCase__ , loss_ignore_index=255 , num_labels=self.num_labels , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ : List[Any] = UperNetForSemanticSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a_ : str = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self ):
"""simple docstring"""
a_ : List[Any] = self.prepare_config_and_inputs()
(
a_
) : int = config_and_inputs
a_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__UpperCamelCase : Dict = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : Optional[int] = False
def _lowercase ( self ):
"""simple docstring"""
a_ : List[Any] = UperNetModelTester(self )
a_ : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
def _lowercase ( self ):
"""simple docstring"""
a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Tuple = model_class(lowerCamelCase__ )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[int] = [*signature.parameters.keys()]
a_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a_ : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a_ : List[str] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = _config_zero_init(lowerCamelCase__ )
a_ : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
a_ : Tuple = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def a ( ):
"""simple docstring"""
a_ : Any = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
a_ : int = Image.open(__lowerCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
a_ : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
a_ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase__ )
a_ : str = prepare_img()
a_ : Any = processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
with torch.no_grad():
a_ : Union[str, Any] = model(**lowerCamelCase__ )
a_ : List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a_ : Any = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
def _lowercase ( self ):
"""simple docstring"""
a_ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
a_ : Dict = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase__ )
a_ : int = prepare_img()
a_ : List[Any] = processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
with torch.no_grad():
a_ : Dict = model(**lowerCamelCase__ )
a_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a_ : Dict = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 718
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 0
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase_ ( __a, __a):
'''simple docstring'''
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 128 , __SCREAMING_SNAKE_CASE = 256 , __SCREAMING_SNAKE_CASE = 2_000.0 , __SCREAMING_SNAKE_CASE = 768 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 64 , __SCREAMING_SNAKE_CASE = 2_048 , __SCREAMING_SNAKE_CASE = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
UpperCamelCase : Optional[int] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase : int = False
UpperCamelCase : Union[str, Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
UpperCamelCase : Tuple = nn.Dropout(p=lowerCAmelCase_ )
UpperCamelCase : Tuple = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
UpperCamelCase : str = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
UpperCamelCase : List[str] = TaLayerNorm(lowerCAmelCase_ )
UpperCamelCase : Optional[int] = nn.Dropout(p=lowerCAmelCase_ )
UpperCamelCase : int = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase : List[Any] = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase : List[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase : Tuple = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase : List[Any] = self.position_encoding(lowerCAmelCase_ )
UpperCamelCase : int = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
UpperCamelCase : Dict = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
UpperCamelCase : Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase : Any = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase : Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase : List[str] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase : Dict = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
UpperCamelCase : Tuple = self.decoder_norm(lowerCAmelCase_ )
UpperCamelCase : List[str] = self.post_dropout(lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-6 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
UpperCamelCase : Optional[int] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase : Tuple = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase : int = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Optional[int] = TaLayerNorm(lowerCAmelCase_ )
UpperCamelCase : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
UpperCamelCase : List[str] = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
UpperCamelCase : Any = nn.Dropout(lowerCAmelCase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : Any = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
UpperCamelCase : Optional[Any] = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
UpperCamelCase : List[Any] = self.attention(lowerCAmelCase_ )
UpperCamelCase : int = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Union[str, Any] = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
UpperCamelCase : str = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
UpperCamelCase : List[Any] = nn.Dropout(lowerCAmelCase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.layer_norm(lowerCAmelCase_ )
UpperCamelCase : List[str] = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase : Dict = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Tuple = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
UpperCamelCase : str = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
UpperCamelCase : Optional[int] = nn.Dropout(lowerCAmelCase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : int = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
UpperCamelCase : Optional[int] = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase : Optional[int] = self.DenseReluDense(lowerCAmelCase_ )
UpperCamelCase : List[str] = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
UpperCamelCase : Optional[int] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
UpperCamelCase : int = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
UpperCamelCase : int = nn.Dropout(lowerCAmelCase_ )
UpperCamelCase : Optional[int] = NewGELUActivation()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = self.act(self.wi_a(lowerCAmelCase_ ) )
UpperCamelCase : str = self.wi_a(lowerCAmelCase_ )
UpperCamelCase : Union[str, Any] = hidden_gelu * hidden_linear
UpperCamelCase : Union[str, Any] = self.dropout(lowerCAmelCase_ )
UpperCamelCase : Dict = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-6 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Any = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
UpperCamelCase : Optional[Any] = eps
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
UpperCamelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase : List[str] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[str] = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.scale_bias(lowerCAmelCase_ )
UpperCamelCase , UpperCamelCase : Optional[Any] = torch.chunk(lowerCAmelCase_ , 2 , -1 )
UpperCamelCase : str = x * (1 + scale) + shift
return x
| 719
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 0
|
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase_ ( __lowercase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase : Optional[int] = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase : Optional[Any] = '''summarizer'''
__UpperCamelCase : str = AutoTokenizer
__UpperCamelCase : Tuple = AutoModelForSeqaSeqLM
__UpperCamelCase : List[str] = ['''text''']
__UpperCamelCase : Dict = ['''text''']
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.pre_processor(__a , return_tensors='''pt''' , truncation=__a )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.model.generate(**__a )[0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
| 720
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 0
|
from collections.abc import Callable
def a ( SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : Optional[int] = a
UpperCamelCase : Dict = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase : int = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCamelCase : Dict = mid
else:
UpperCamelCase : List[str] = mid
UpperCamelCase : Optional[int] = start + (end - start) / 2.0
return mid
def a ( SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 721
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 0
|
__UpperCAmelCase : int = range(2, 20 + 1)
__UpperCAmelCase : List[str] = [10**k for k in range(ks[-1] + 1)]
__UpperCAmelCase : int = {}
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : Dict = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : List[Any] = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase , UpperCamelCase : Tuple = 0, 0
UpperCamelCase : Tuple = n - i
UpperCamelCase : List[str] = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
UpperCamelCase : List[str] = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
UpperCamelCase : Optional[int] = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase : List[str] = _k
break
if max_jump >= 0:
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase : Union[str, Any] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
UpperCamelCase , UpperCamelCase : Any = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = []
else:
UpperCamelCase : Union[str, Any] = {c: []}
UpperCamelCase : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase , UpperCamelCase : List[str] = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase , UpperCamelCase : Any = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
UpperCamelCase : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase : List[Any] = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase : Any = i
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase : Optional[Any] = ds_c + ds_b
diff += addend
UpperCamelCase : List[str] = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = a_i[j] + addend
UpperCamelCase , UpperCamelCase : str = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Tuple = digits[j] + addend
if s >= 1_0:
UpperCamelCase , UpperCamelCase : List[Any] = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
UpperCamelCase : Any = addend // 1_0 + quotient
else:
UpperCamelCase : Optional[int] = s
UpperCamelCase : Any = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCamelCase , UpperCamelCase : Optional[Any] = divmod(SCREAMING_SNAKE_CASE_ , 1_0 )
digits.append(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0**1_5 ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [1]
UpperCamelCase : Optional[int] = 1
UpperCamelCase : Tuple = 0
while True:
UpperCamelCase , UpperCamelCase : Dict = next_term(SCREAMING_SNAKE_CASE_ , 2_0 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
import itertools
import math
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = 2
while True:
if is_prime(_SCREAMING_SNAKE_CASE ):
yield num
num += 1
def a ( SCREAMING_SNAKE_CASE_ : Any = 1_0_0_0_1 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0.9 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCamelCase : Any = size if size is not None else {'''shortest_edge''': 30}
UpperCamelCase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCamelCase : List[Any] = parent
UpperCamelCase : str = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Any = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : Any = do_resize_and_center_crop
UpperCamelCase : List[Any] = size
UpperCamelCase : Optional[Any] = crop_pct
UpperCamelCase : str = crop_size
UpperCamelCase : Dict = do_normalize
UpperCamelCase : Dict = image_mean
UpperCamelCase : List[str] = image_std
def _lowercase ( self ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __snake_case, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = PoolFormerImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = PoolFormerImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_std''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Tuple = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[int] = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 702
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = set({'''(''', '''[''', '''{'''} )
UpperCamelCase : List[Any] = set({''')''', ''']''', '''}'''} )
UpperCamelCase : Tuple = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = input('''Enter sequence of brackets: ''' )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , '''is balanced''' )
else:
print(lowerCAmelCase_ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = "Hello, World!"
__UpperCAmelCase : int = "en_XX"
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool ):
"""simple docstring"""
UpperCamelCase : int = Path('''data_bin''' )
UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_ ).parent ) , checkpoint_file=Path(a_ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(a_ )
UpperCamelCase : Any = xmod.model.encoder.sentence_encoder
UpperCamelCase : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCamelCase : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_ )
UpperCamelCase : List[Any] = XmodForSequenceClassification(a_ ) if classification_head else XmodForMaskedLM(a_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCamelCase : Tuple = xmod_sent_encoder.embed_positions.weight
UpperCamelCase : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCamelCase : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCamelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase : Union[str, Any] = model.roberta.encoder.layer[i]
UpperCamelCase : List[str] = xmod_sent_encoder.layers[i]
# self attention
UpperCamelCase : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
UpperCamelCase : Optional[int] = xmod_layer.self_attn.q_proj.weight
UpperCamelCase : List[str] = xmod_layer.self_attn.q_proj.bias
UpperCamelCase : str = xmod_layer.self_attn.k_proj.weight
UpperCamelCase : Optional[Any] = xmod_layer.self_attn.k_proj.bias
UpperCamelCase : Dict = xmod_layer.self_attn.v_proj.weight
UpperCamelCase : Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
UpperCamelCase : List[Any] = xmod_layer.self_attn.out_proj.weight
UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.bias
UpperCamelCase : str = xmod_layer.self_attn_layer_norm.weight
UpperCamelCase : List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCamelCase : Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
UpperCamelCase : int = xmod_layer.fca.weight
UpperCamelCase : Union[str, Any] = xmod_layer.fca.bias
# output
UpperCamelCase : List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
UpperCamelCase : str = xmod_layer.fca.weight
UpperCamelCase : int = xmod_layer.fca.bias
UpperCamelCase : List[Any] = xmod_layer.final_layer_norm.weight
UpperCamelCase : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCamelCase : List[str] = xmod_layer.adapter_layer_norm.weight
UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCamelCase : Optional[int] = bert_output.adapter_modules[lang_code]
UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCamelCase : List[Any] = from_adapter.fca.weight
UpperCamelCase : List[Any] = from_adapter.fca.bias
UpperCamelCase : Dict = from_adapter.fca.weight
UpperCamelCase : Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCamelCase : Dict = xmod_sent_encoder.layer_norm.weight
UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCamelCase : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
UpperCamelCase : Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
UpperCamelCase : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
UpperCamelCase : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
UpperCamelCase : int = xmod.model.encoder.lm_head.dense.weight
UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCamelCase : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.weight
UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase : str = xmod.encode(a_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(a_ )
UpperCamelCase : Any = model(a_ )[0]
if classification_head:
UpperCamelCase : Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_ ) )
else:
UpperCamelCase : int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCamelCase : str = torch.allclose(a_ , a_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(a_ ).mkdir(parents=a_ , exist_ok=a_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__UpperCAmelCase : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 705
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : str = max_length
UpperCamelCase : int = num_mel_bins
UpperCamelCase : Any = is_training
UpperCamelCase : int = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Optional[Any] = frequency_stride
UpperCamelCase : Any = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase : str = frequency_out_dimension * time_out_dimension
UpperCamelCase : Dict = num_patches + 2
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = self.get_config()
return config, input_values, labels
def _lowercase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = ASTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Optional[int] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Any = False
__UpperCamelCase : Any = False
__UpperCamelCase : Any = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = ASTModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Dict = ['''input_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = ASTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCamelCase , UpperCamelCase : Optional[Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.default_feature_extractor
UpperCamelCase : Any = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.default_feature_extractor
UpperCamelCase , UpperCamelCase : Union[str, Any] = prepare_audio()
UpperCamelCase : Union[str, Any] = audio.squeeze().numpy()
UpperCamelCase : List[str] = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 706
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCamelCase : Optional[Any] = nn.Parameter(lowercase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCamelCase : Optional[Any] = nn.Parameter(lowercase_ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase : List[Any] = np.asarray(weights[0] )
UpperCamelCase : Any = np.asarray(weights[1] )
UpperCamelCase : str = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : int = np.asarray(weights[0] )
UpperCamelCase : Optional[int] = np.asarray(weights[1] )
UpperCamelCase : str = np.asarray(weights[2] )
UpperCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , )
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = weights[0][0][0]
UpperCamelCase : Any = np.asarray(layer_norm_a[0] )
UpperCamelCase : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# lsh weights + output
UpperCamelCase : List[Any] = weights[0][1]
if len(lowercase_ ) < 4:
set_layer_weights_in_torch_lsh(lowercase_ , torch_block.attention , lowercase_ )
else:
set_layer_weights_in_torch_local(lowercase_ , torch_block.attention , lowercase_ )
# intermediate weighs
UpperCamelCase : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase_ ) == 4:
UpperCamelCase : Tuple = intermediate_weights[2]
# layernorm 2
UpperCamelCase : int = np.asarray(intermediate_weights[0][0] )
UpperCamelCase : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# intermediate dense
UpperCamelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
UpperCamelCase : Tuple = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
# intermediate out
UpperCamelCase : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
UpperCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = torch_model.reformer
# word embeds
UpperCamelCase : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowercase_ ) , )
if isinstance(weights[3] , lowercase_ ):
UpperCamelCase : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCamelCase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCamelCase : Dict = nn.Parameter(torch.tensor(lowercase_ ) )
UpperCamelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCamelCase : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase_ , lowercase_ , lowercase_ )
# output layer norm
UpperCamelCase : int = np.asarray(weights[7][0] )
UpperCamelCase : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# output embeddings
UpperCamelCase : str = np.asarray(weights[9][0] )
UpperCamelCase : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : Dict = ReformerConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : Dict = ReformerModelWithLMHead(lowercase_ )
with open(lowercase_ , '''rb''' ) as f:
UpperCamelCase : Optional[Any] = pickle.load(lowercase_ )["weights"]
set_model_weights_in_torch(lowercase_ , lowercase_ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 707
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
__UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
__UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
__UpperCAmelCase : str = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
__UpperCAmelCase : Optional[int] = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["input_ids"]
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = RESOURCE_FILES_NAMES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="utf8" , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = do_lower_case
UpperCamelCase : Dict = sentencepiece_model_ckpt
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase : Optional[Any] = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase : str = self.tokenize(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : str = '''''', []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase : Optional[int] = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = unicodedata.normalize('''NFKC''' , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase : Any = token[1:]
UpperCamelCase : Optional[int] = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
UpperCamelCase : List[Any] = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.__dict__.copy()
UpperCamelCase : str = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCamelCase : List[str] = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCamelCase : Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCamelCase : Tuple = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCamelCase : int = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[Any] = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
UpperCamelCase : Any = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase : Any = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase : Optional[int] = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
with io.open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = line.rstrip('''\n''' )
UpperCamelCase : List[Any] = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCamelCase : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCamelCase : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
UpperCamelCase : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , '''sentencepiece.bpe.model''' )
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 643
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Tuple = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 0
|
import os
def a ( ):
"""simple docstring"""
UpperCamelCase : str = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''triangle.txt''' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase : int = f.readlines()
UpperCamelCase : str = []
for line in triangle:
UpperCamelCase : Union[str, Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(SCREAMING_SNAKE_CASE_ ) )
a.append(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(len(a[i] ) ):
UpperCamelCase : Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase : Optional[int] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 709
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643
| 0
|
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
UpperCamelCase : Any = key
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(_a , _a ) and isinstance(_a , _a )
UpperCamelCase : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 710
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 643
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __SCREAMING_SNAKE_CASE=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : Any = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Tuple = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : List[str] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : int = size
UpperCamelCase : Tuple = do_center_crop
UpperCamelCase : List[Any] = crop_size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : List[Any] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : Union[str, Any] = do_convert_rgb
def _lowercase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _lowercase ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase : Union[str, Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase : int = []
for i in range(self.batch_size ):
UpperCamelCase : Optional[int] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase : int = [torch.from_numpy(UpperCamelCase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_, unittest.TestCase):
__UpperCamelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__ )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_convert_rgb''' ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCamelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : int = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_, unittest.TestCase):
__UpperCamelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__ )
UpperCamelCase : Optional[Any] = 3
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_convert_rgb''' ) )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase : Tuple = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 711
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : str = {"vocab_file": "spiece.model"}
__UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
__UpperCAmelCase : int = {
"google/reformer-crime-and-punishment": 524288,
}
class UpperCAmelCase_ ( lowerCAmelCase__):
'''simple docstring'''
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=[] , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCamelCase : int = vocab_file
UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.__dict__.copy()
UpperCamelCase : Optional[int] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase : Union[str, Any] = {}
UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_lowerCamelCase )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCamelCase : Optional[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCamelCase : Union[str, Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
UpperCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 712
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643
| 0
|
import logging
from transformers import PretrainedConfig
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
__UpperCAmelCase : List[str] = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class UpperCAmelCase_ ( _A):
'''simple docstring'''
__UpperCamelCase : List[Any] = "bertabs"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.2 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=0.2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[Any] = max_pos
UpperCamelCase : List[Any] = enc_layers
UpperCamelCase : List[str] = enc_hidden_size
UpperCamelCase : str = enc_heads
UpperCamelCase : Optional[Any] = enc_ff_size
UpperCamelCase : Any = enc_dropout
UpperCamelCase : int = dec_layers
UpperCamelCase : List[str] = dec_hidden_size
UpperCamelCase : Dict = dec_heads
UpperCamelCase : Any = dec_ff_size
UpperCamelCase : str = dec_dropout
| 713
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : str = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase : Optional[int] = input_file.read()
UpperCamelCase : Union[str, Any] = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as input_file:
UpperCamelCase : Optional[int] = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCamelCase : Tuple = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase : Dict = regexp.finditer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Path('''./datasets''' )
UpperCamelCase : Tuple = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 643
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase_ ( __lowercase):
'''simple docstring'''
__UpperCamelCase : List[str] = 'data2vec-text'
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : int = intermediate_size
UpperCamelCase : Dict = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : List[Any] = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Optional[int] = classifier_dropout
class UpperCAmelCase_ ( __lowercase):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 714
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : Any = XGLMConfig
__UpperCamelCase : Dict = {}
__UpperCamelCase : List[str] = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=14 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : List[str] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : int = vocab_size
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : Optional[Any] = ffn_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : List[str] = activation_dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = None
UpperCamelCase : Dict = 0
UpperCamelCase : int = 2
UpperCamelCase : Any = 1
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = self.get_config()
UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFXGLMModelTester(self )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : List[Any] = TFXGLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[Any] = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase : str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase : Tuple = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase : str = model.generate(__SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , seed=[7, 0] )
UpperCamelCase : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase : Tuple = '''left'''
# use different length sentences to test batching
UpperCamelCase : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inputs['''input_ids''']
UpperCamelCase : Optional[int] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase : Optional[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : str = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase : List[Any] = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_new_tokens=12 )
UpperCamelCase : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 643
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : int = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ['ConvNextFeatureExtractor']
__UpperCAmelCase : Tuple = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 715
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Union[str, Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__UpperCAmelCase : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__UpperCamelCase : Any = MBartTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = vocab_file
UpperCamelCase : List[str] = False if not self.vocab_file else True
UpperCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCamelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase : List[str] = src_lang
UpperCamelCase : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = []
UpperCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 643
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCAmelCase : Any = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCAmelCase : Any = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCAmelCase_ ( __UpperCAmelCase):
'''simple docstring'''
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : Optional[int] = MvpTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase : Any = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Tuple = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase : Any = '''post_processor'''
UpperCamelCase : Union[str, Any] = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCamelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCamelCase : Dict = tuple(state['''cls'''] )
UpperCamelCase : Optional[int] = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase : List[str] = add_prefix_space
UpperCamelCase : Dict = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCamelCase : List[Any] = trim_offsets
UpperCamelCase : List[str] = True
if changes_to_apply:
UpperCamelCase : str = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
UpperCamelCase : List[Any] = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
UpperCamelCase : List[str] = value
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[str] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Any = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 716
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase_):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , __a , )
super().__init__(*__a , **__a )
| 717
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Any = ["input_features"]
def __init__( self , __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=16_000 , __SCREAMING_SNAKE_CASE=160 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[str] = n_fft
UpperCamelCase : Dict = hop_length
UpperCamelCase : Dict = chunk_length
UpperCamelCase : List[str] = chunk_length * sampling_rate
UpperCamelCase : Dict = self.n_samples // hop_length
UpperCamelCase : str = sampling_rate
UpperCamelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
UpperCamelCase : int = log_spec[:, :-1]
UpperCamelCase : int = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase : List[Any] = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase : Optional[Any] = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[int] = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "max_length" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase : Tuple = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Optional[int] = [np.asarray([raw_speech] ).T]
UpperCamelCase : Optional[int] = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
UpperCamelCase : Optional[Any] = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
UpperCamelCase : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
UpperCamelCase : Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
UpperCamelCase : Tuple = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase : Dict = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 643
| 0
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 6 ):
"""simple docstring"""
a_ : int = None
a_ : int = None
self.create_linked_list(__A )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ : Any = Node()
a_ : Dict = current_node
a_ : List[str] = current_node
a_ : int = current_node
for _ in range(1 , __A ):
a_ : Tuple = Node()
a_ : Optional[int] = current_node
a_ : int = previous_node
a_ : Optional[Any] = current_node
a_ : Any = self.front
a_ : Optional[int] = previous_node
def _lowercase ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _lowercase ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
a_ : List[str] = self.rear.next
if self.rear:
a_ : List[str] = data
def _lowercase ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
a_ : List[str] = self.front.data
a_ : Union[str, Any] = None
return data
a_ : Any = self.front
a_ : List[Any] = old_front.next
a_ : Union[str, Any] = old_front.data
a_ : Optional[Any] = None
return data
def _lowercase ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def _lowercase ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
a_ : Optional[Any] = None
a_ : int = None
a_ : Tuple = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Dict = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__UpperCAmelCase : Tuple = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__UpperCAmelCase : Any = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
):
UpperCamelCase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Optional[int] = do_lower_case
UpperCamelCase : Optional[Any] = strip_accents
UpperCamelCase : List[Any] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.__dict__.copy()
UpperCamelCase : Any = BertPreTokenizer()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = d
UpperCamelCase : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
UpperCamelCase : Any = PreTokenizer.custom(JiebaPreTokenizer(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Any = BertPreTokenizer()
return super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 643
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCAmelCase_ ( __snake_case):
'''simple docstring'''
__UpperCamelCase : Dict = "van"
def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[64, 128, 320, 512] , __SCREAMING_SNAKE_CASE=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=1e-2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_lowercase )
UpperCamelCase : Optional[Any] = image_size
UpperCamelCase : int = num_channels
UpperCamelCase : Dict = patch_sizes
UpperCamelCase : int = strides
UpperCamelCase : List[str] = hidden_sizes
UpperCamelCase : Optional[int] = depths
UpperCamelCase : Any = mlp_ratios
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Tuple = layer_scale_init_value
UpperCamelCase : Optional[Any] = drop_path_rate
UpperCamelCase : Optional[int] = dropout_rate
| 719
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__UpperCAmelCase : List[Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = '''albert'''
def __init__( self , __SCREAMING_SNAKE_CASE=30_000 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=4_096 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=16_384 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : str = embedding_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Dict = num_hidden_groups
UpperCamelCase : List[str] = num_attention_heads
UpperCamelCase : List[Any] = inner_group_num
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : Optional[Any] = classifier_dropout_prob
UpperCamelCase : Any = position_embedding_type
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 720
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : Any = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__UpperCAmelCase : str = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Dict = ['input_ids', 'attention_mask']
__UpperCamelCase : Optional[int] = DistilBertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
UpperCamelCase : Tuple = getattr(_a , normalizer_state.pop('''type''' ) )
UpperCamelCase : Optional[Any] = do_lower_case
UpperCamelCase : str = strip_accents
UpperCamelCase : Union[str, Any] = tokenize_chinese_chars
UpperCamelCase : Optional[int] = normalizer_class(**_a )
UpperCamelCase : Optional[Any] = do_lower_case
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Optional[int] = [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : List[str] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 721
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
__UpperCAmelCase : Dict = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
__UpperCAmelCase : Dict = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a ( ):
"""simple docstring"""
UpperCamelCase : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCamelCase : Optional[int] = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase : int = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Optional[int] = set()
UpperCamelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : List[Any] = char
return pairs
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
UpperCamelCase : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
UpperCamelCase : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
UpperCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
UpperCamelCase : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
UpperCamelCase : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
UpperCamelCase : List[str] = json.load(UpperCamelCase__ )
UpperCamelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase : List[Any] = errors # how to handle errors in decoding
UpperCamelCase : int = bytes_to_unicode()
UpperCamelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
UpperCamelCase : Tuple = merges_handle.read().split('''\n''' )[1:-1]
UpperCamelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : Dict = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : str = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase : str = tuple(UpperCamelCase__ )
UpperCamelCase : Any = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
UpperCamelCase : Dict = min(UpperCamelCase__ , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : str = bigram
UpperCamelCase : Tuple = []
UpperCamelCase : int = 0
while i < len(UpperCamelCase__ ):
try:
UpperCamelCase : Union[str, Any] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Optional[Any] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : int = tuple(UpperCamelCase__ )
UpperCamelCase : Optional[Any] = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
UpperCamelCase : Dict = get_pairs(UpperCamelCase__ )
UpperCamelCase : Union[str, Any] = ''' '''.join(UpperCamelCase__ )
UpperCamelCase : Dict = word
return word
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for token in re.findall(self.pat , UpperCamelCase__ ):
UpperCamelCase : int = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''.join(UpperCamelCase__ )
UpperCamelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
UpperCamelCase : List[str] = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCamelCase : str = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Tuple = [self.cls_token_id]
UpperCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
UpperCamelCase : Tuple = ''' ''' + text
return (text, kwargs)
| 700
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "conditional_detr"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.25 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = backbone_config.get('''model_type''' )
UpperCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = use_timm_backbone
UpperCamelCase : int = backbone_config
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Optional[int] = encoder_layers
UpperCamelCase : Union[str, Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : Any = dropout
UpperCamelCase : List[Any] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : List[str] = activation_function
UpperCamelCase : Optional[int] = init_std
UpperCamelCase : Optional[Any] = init_xavier_std
UpperCamelCase : Union[str, Any] = encoder_layerdrop
UpperCamelCase : Optional[Any] = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : Optional[Any] = auxiliary_loss
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Optional[int] = backbone
UpperCamelCase : Dict = use_pretrained_backbone
UpperCamelCase : Tuple = dilation
# Hungarian matcher
UpperCamelCase : Union[str, Any] = class_cost
UpperCamelCase : List[Any] = bbox_cost
UpperCamelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase : Optional[Any] = mask_loss_coefficient
UpperCamelCase : Optional[int] = dice_loss_coefficient
UpperCamelCase : Optional[Any] = cls_loss_coefficient
UpperCamelCase : Optional[int] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self ):
"""simple docstring"""
return self.d_model
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase : List[Any] = self.backbone_config.to_dict()
UpperCamelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : Optional[int] = out_indices if out_indices is not None else [4]
UpperCamelCase : List[str] = stage_names
UpperCamelCase : Tuple = out_features
UpperCamelCase : List[Any] = backbone
UpperCamelCase : Union[str, Any] = batch_size
UpperCamelCase : List[str] = image_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = use_pretrained_backbone
UpperCamelCase : Dict = is_training
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values
def _lowercase ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TimmBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCAmelCase_ ( snake_case_, snake_case_, snake_case_, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase : int = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = TimmBackboneModelTester(self )
UpperCamelCase : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = '''resnet18'''
UpperCamelCase : List[Any] = '''microsoft/resnet-18'''
UpperCamelCase : Union[str, Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCamelCase : List[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
UpperCamelCase : Any = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Tuple = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Tuple = True
UpperCamelCase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCamelCase : Tuple = self.all_model_classes[0]
UpperCamelCase : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCamelCase : Optional[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCamelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCamelCase : Tuple = copy.deepcopy(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCamelCase : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = False
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE )
| 701
|
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( _UpperCAmelCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = 3.0
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase__ ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase : List[Any] = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , lowercase__ )
@require_multi_gpu
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase : Optional[int] = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase : Optional[Any] = torch.nn.Linear(100, 200)
__UpperCAmelCase : Tuple = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : str = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 702
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
UpperCamelCase : List[str] = bin(abs(SCREAMING_SNAKE_CASE_ ) - (1 << binary_number_length) )[3:]
UpperCamelCase : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
UpperCamelCase : Optional[int] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
__UpperCAmelCase : Optional[int] = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : int = 'huggingface/label-files'
UpperCamelCase : Tuple = 'imagenet-1k-id2label.json'
UpperCamelCase : Dict = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase : Dict = {int(__lowercase ): v for k, v in idalabel.items()}
UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase : Dict = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=__lowercase , num_labels=1_0_0_0 , idalabel=__lowercase , labelaid=__lowercase , )
return config
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if "stem.conv" in name:
UpperCamelCase : str = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCamelCase : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
UpperCamelCase : Any = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
UpperCamelCase : Tuple = 'bit.' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase : List[Any] = 'bit.encoder.' + name
return name
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : str = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=False ):
"""simple docstring"""
UpperCamelCase : Any = get_config(__lowercase )
# load original model from timm
UpperCamelCase : List[str] = create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase : List[str] = state_dict.pop(__lowercase )
UpperCamelCase : Dict = val.squeeze() if 'head' in key else val
# load HuggingFace model
UpperCamelCase : Union[str, Any] = BitForImageClassification(__lowercase )
model.eval()
model.load_state_dict(__lowercase )
# create image processor
UpperCamelCase : List[str] = create_transform(**resolve_data_config({} , model=__lowercase ) )
UpperCamelCase : Dict = transform.transforms
UpperCamelCase : Tuple = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase : List[Any] = BitImageProcessor(
do_resize=__lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : List[Any] = transform(__lowercase ).unsqueeze(0 )
UpperCamelCase : str = processor(__lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowercase , __lowercase )
# verify logits
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(__lowercase )
UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase : Optional[int] = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704
|
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCamelCase : int = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
UpperCamelCase : List[str] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:]
UpperCamelCase : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 643
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] = 1 / sqrt(2 ) ):
"""simple docstring"""
UpperCamelCase : Dict = tau * frequency / samplerate
UpperCamelCase : Optional[Any] = sin(__snake_case )
UpperCamelCase : Tuple = cos(__snake_case )
UpperCamelCase : Union[str, Any] = _sin / (2 * q_factor)
UpperCamelCase : Union[str, Any] = (1 - _cos) / 2
UpperCamelCase : Tuple = 1 - _cos
UpperCamelCase : List[str] = 1 + alpha
UpperCamelCase : List[str] = -2 * _cos
UpperCamelCase : Tuple = 1 - alpha
UpperCamelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 / sqrt(2 ) ):
"""simple docstring"""
UpperCamelCase : Tuple = tau * frequency / samplerate
UpperCamelCase : Tuple = sin(__snake_case )
UpperCamelCase : int = cos(__snake_case )
UpperCamelCase : List[Any] = _sin / (2 * q_factor)
UpperCamelCase : Dict = (1 + _cos) / 2
UpperCamelCase : Optional[Any] = -1 - _cos
UpperCamelCase : Optional[Any] = 1 + alpha
UpperCamelCase : Optional[int] = -2 * _cos
UpperCamelCase : List[Any] = 1 - alpha
UpperCamelCase : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int = 1 / sqrt(2 ) ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = tau * frequency / samplerate
UpperCamelCase : Optional[int] = sin(__snake_case )
UpperCamelCase : Optional[int] = cos(__snake_case )
UpperCamelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCamelCase : Dict = _sin / 2
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Optional[int] = -ba
UpperCamelCase : List[Any] = 1 + alpha
UpperCamelCase : str = -2 * _cos
UpperCamelCase : int = 1 - alpha
UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any = 1 / sqrt(2 ) ):
"""simple docstring"""
UpperCamelCase : Any = tau * frequency / samplerate
UpperCamelCase : int = sin(__snake_case )
UpperCamelCase : List[Any] = cos(__snake_case )
UpperCamelCase : Dict = _sin / (2 * q_factor)
UpperCamelCase : Any = 1 - alpha
UpperCamelCase : Union[str, Any] = -2 * _cos
UpperCamelCase : Any = 1 + alpha
UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int = 1 / sqrt(2 ) , ):
"""simple docstring"""
UpperCamelCase : int = tau * frequency / samplerate
UpperCamelCase : Tuple = sin(__snake_case )
UpperCamelCase : Any = cos(__snake_case )
UpperCamelCase : List[str] = _sin / (2 * q_factor)
UpperCamelCase : List[str] = 1_0 ** (gain_db / 4_0)
UpperCamelCase : Optional[int] = 1 + alpha * big_a
UpperCamelCase : List[str] = -2 * _cos
UpperCamelCase : List[Any] = 1 - alpha * big_a
UpperCamelCase : Union[str, Any] = 1 + alpha / big_a
UpperCamelCase : str = -2 * _cos
UpperCamelCase : Optional[Any] = 1 - alpha / big_a
UpperCamelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 1 / sqrt(2 ) , ):
"""simple docstring"""
UpperCamelCase : str = tau * frequency / samplerate
UpperCamelCase : Optional[int] = sin(__snake_case )
UpperCamelCase : Dict = cos(__snake_case )
UpperCamelCase : Optional[int] = _sin / (2 * q_factor)
UpperCamelCase : Optional[Any] = 1_0 ** (gain_db / 4_0)
UpperCamelCase : int = (big_a + 1) - (big_a - 1) * _cos
UpperCamelCase : Tuple = (big_a + 1) + (big_a - 1) * _cos
UpperCamelCase : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
UpperCamelCase : str = (big_a - 1) + (big_a + 1) * _cos
UpperCamelCase : int = 2 * sqrt(__snake_case ) * alpha
UpperCamelCase : List[Any] = big_a * (pmc + aaa)
UpperCamelCase : Optional[Any] = 2 * big_a * mpc
UpperCamelCase : Dict = big_a * (pmc - aaa)
UpperCamelCase : Tuple = ppmc + aaa
UpperCamelCase : Optional[int] = -2 * pmpc
UpperCamelCase : Optional[int] = ppmc - aaa
UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any = 1 / sqrt(2 ) , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tau * frequency / samplerate
UpperCamelCase : Union[str, Any] = sin(__snake_case )
UpperCamelCase : Optional[int] = cos(__snake_case )
UpperCamelCase : Dict = _sin / (2 * q_factor)
UpperCamelCase : List[Any] = 1_0 ** (gain_db / 4_0)
UpperCamelCase : List[Any] = (big_a + 1) - (big_a - 1) * _cos
UpperCamelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
UpperCamelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCamelCase : str = 2 * sqrt(__snake_case ) * alpha
UpperCamelCase : Optional[Any] = big_a * (ppmc + aaa)
UpperCamelCase : Union[str, Any] = -2 * big_a * pmpc
UpperCamelCase : Any = big_a * (ppmc - aaa)
UpperCamelCase : Dict = pmc + aaa
UpperCamelCase : List[str] = 2 * mpc
UpperCamelCase : Dict = pmc - aaa
UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 705
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.