code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase__ =logging.getLogger()
UpperCamelCase__ =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"source": "What is love ?", "target": "life"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"train": 1_2, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_SCREAMING_SNAKE_CASE : Tuple = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowerCamelCase , F"""{split}.{field}""" ) , "w" ) as f:
f.write(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = "pytorch" ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase , "output" )
_SCREAMING_SNAKE_CASE : str = os.path.join(__lowerCamelCase , "data" )
self._create_dummy_data(data_dir=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_SCREAMING_SNAKE_CASE : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "metrics.json" )
with open(__lowerCamelCase ) as f:
_SCREAMING_SNAKE_CASE : Tuple = json.load(__lowerCamelCase )
return result
@require_torch_gpu
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : str = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 325
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'dandelin/vilt-b32-finetuned-vqa'
__snake_case = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
__snake_case = 'image_qa'
__snake_case = AutoProcessor
__snake_case = AutoModelForVisualQuestionAnswering
__snake_case = ['image', 'text']
__snake_case = ['text']
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> int:
requires_backends(self , ["vision"] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> str:
return self.pre_processor(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 325
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 1
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase ):
# preprocessing the first row
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(__lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(__lowerCamelCase ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'ChineseCLIPImageProcessor'
__snake_case = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("feature_extractor" )
_SCREAMING_SNAKE_CASE : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self.image_processor
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_SCREAMING_SNAKE_CASE : int = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Tuple:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : int = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
| 325
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 1
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCamelCase__ =10
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for i in range(__lowerCamelCase, __lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = (left + right) // 3 + 1
_SCREAMING_SNAKE_CASE : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_SCREAMING_SNAKE_CASE : int = one_third - 1
elif array[two_third] < target:
_SCREAMING_SNAKE_CASE : Optional[Any] = two_third + 1
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = one_third + 1
_SCREAMING_SNAKE_CASE : Optional[int] = two_third - 1
else:
return -1
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if left < right:
if right - left < precision:
return lin_search(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = (left + right) // 3 + 1
_SCREAMING_SNAKE_CASE : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCamelCase, one_third - 1, __lowerCamelCase, __lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
return rec_ternary_search(one_third + 1, two_third - 1, __lowerCamelCase, __lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ =input('Enter numbers separated by comma:\n').strip()
UpperCamelCase__ =[int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCamelCase__ =int(input('Enter the number to be found in the list:\n').strip())
UpperCamelCase__ =ite_ternary_search(collection, target)
UpperCamelCase__ =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 325
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase__ =TypeVar('T')
class lowerCAmelCase__( Generic[T] ):
'''simple docstring'''
__snake_case = 42 # Cache store of keys
__snake_case = 42 # References of the keys in cache
__snake_case = 1_0 # Maximum capacity of cache
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Dict = deque()
_SCREAMING_SNAKE_CASE : Tuple = set()
if not n:
_SCREAMING_SNAKE_CASE : List[Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_SCREAMING_SNAKE_CASE : List[str] = n
def UpperCamelCase_ ( self , __lowerCamelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_SCREAMING_SNAKE_CASE : List[Any] = self.dq_store.pop()
self.key_reference.remove(__lowerCamelCase )
else:
self.dq_store.remove(__lowerCamelCase )
self.dq_store.appendleft(__lowerCamelCase )
self.key_reference.add(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> None:
for k in self.dq_store:
print(__lowerCamelCase )
def __repr__( self ) -> str:
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ =LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 325
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
def lowerCamelCase__ (__lowerCamelCase = 1, __lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List[str] = 0
for divide_by_number in range(__lowerCamelCase, digit + 1 ):
_SCREAMING_SNAKE_CASE : list[int] = []
_SCREAMING_SNAKE_CASE : Dict = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'xlm-roberta'
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : Dict = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = position_embedding_type
_SCREAMING_SNAKE_CASE : Tuple = use_cache
_SCREAMING_SNAKE_CASE : Tuple = classifier_dropout
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 325
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 1
|
import requests
UpperCamelCase__ ='https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowerCamelCase__ (__lowerCamelCase ):
# fetching a list of articles in json format
_SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"], 1 ):
print(f"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 325
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)] )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_pretrained(__a , config_name=__a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __a )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("gpt2" )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_model_config(__a )
_SCREAMING_SNAKE_CASE : str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig()
_SCREAMING_SNAKE_CASE : Any = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
_SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(__a )
_SCREAMING_SNAKE_CASE : Tuple = generation_config.update(**__a )
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {"foo": "bar"} )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = GenerationConfig()
_SCREAMING_SNAKE_CASE : Tuple = 'bar'
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__a )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(__a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_model_config(__a )
assert not hasattr(__a , "foo" ) # no new kwargs should be initialized if from config
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __a )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(__a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = TOKEN
HfFolder.save_token(__a )
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="test-generation-config" , push_to_hub=__a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-generation-config-org" , push_to_hub=__a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
| 350
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__( A__ , A__ , A__ ):
'''simple docstring'''
__snake_case = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = 5_0_2_5_7 , __lowerCamelCase = 1_0_2_4 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = None , __lowerCamelCase = "gelu_new" , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 1E-5 , __lowerCamelCase = 0.02 , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = False , ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
_SCREAMING_SNAKE_CASE : str = prefix_hidden_dim
_SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_SCREAMING_SNAKE_CASE : List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
_SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel(__A )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(__A )
_SCREAMING_SNAKE_CASE : Any = self.encode_prefix(__A )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.decode_prefix(__A )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_SCREAMING_SNAKE_CASE : int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
_SCREAMING_SNAKE_CASE : Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = torch.split(__A , 1 , dim=0 )
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : List[str] = []
for feature in features:
_SCREAMING_SNAKE_CASE : Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
_SCREAMING_SNAKE_CASE : Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_SCREAMING_SNAKE_CASE : Tuple = torch.stack(__A )
_SCREAMING_SNAKE_CASE : int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase = 5 , __lowerCamelCase = 6_7 , __lowerCamelCase = 1.0 , __lowerCamelCase = None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : int = torch.ones(__A , device=__A , dtype=torch.int )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
_SCREAMING_SNAKE_CASE : List[str] = input_embeds
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
_SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=__A )
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_SCREAMING_SNAKE_CASE : Dict = logits.softmax(-1 ).log()
if scores is None:
_SCREAMING_SNAKE_CASE : Any = logits.topk(__A , -1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
_SCREAMING_SNAKE_CASE : List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_SCREAMING_SNAKE_CASE : List[str] = next_tokens
else:
_SCREAMING_SNAKE_CASE : List[Any] = tokens.expand(__A , *tokens.shape[1:] )
_SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
_SCREAMING_SNAKE_CASE : List[Any] = -float(np.inf )
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_SCREAMING_SNAKE_CASE : List[Any] = scores_sum / seq_lengths[:, None]
_SCREAMING_SNAKE_CASE : Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
_SCREAMING_SNAKE_CASE : Optional[Any] = next_tokens // scores_sum.shape[1]
_SCREAMING_SNAKE_CASE : Dict = seq_lengths[next_tokens_source]
_SCREAMING_SNAKE_CASE : Tuple = next_tokens % scores_sum.shape[1]
_SCREAMING_SNAKE_CASE : Optional[Any] = next_tokens.unsqueeze(1 )
_SCREAMING_SNAKE_CASE : str = tokens[next_tokens_source]
_SCREAMING_SNAKE_CASE : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
_SCREAMING_SNAKE_CASE : Dict = generated[next_tokens_source]
_SCREAMING_SNAKE_CASE : Dict = scores_sum_average * seq_lengths
_SCREAMING_SNAKE_CASE : Tuple = is_stopped[next_tokens_source]
_SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_SCREAMING_SNAKE_CASE : List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
_SCREAMING_SNAKE_CASE : Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
_SCREAMING_SNAKE_CASE : str = scores / seq_lengths
_SCREAMING_SNAKE_CASE : Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = [tokens[i] for i in order]
_SCREAMING_SNAKE_CASE : Dict = torch.stack(__A , dim=0 )
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 351
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=1_8 , __lowerCamelCase=3_0 , __lowerCamelCase=4_0_0 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=[0.4814_5466, 0.457_8275, 0.4082_1073] , __lowerCamelCase=[0.2686_2954, 0.2613_0258, 0.2757_7711] , __lowerCamelCase=True , ) -> str:
_SCREAMING_SNAKE_CASE : int = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : Dict = image_size
_SCREAMING_SNAKE_CASE : List[str] = min_resolution
_SCREAMING_SNAKE_CASE : int = max_resolution
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Dict = size
_SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
_SCREAMING_SNAKE_CASE : Optional[int] = crop_size
_SCREAMING_SNAKE_CASE : int = do_normalize
_SCREAMING_SNAKE_CASE : Dict = image_mean
_SCREAMING_SNAKE_CASE : Optional[int] = image_std
_SCREAMING_SNAKE_CASE : Optional[int] = do_convert_rgb
def UpperCamelCase_ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase_ ( self , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE : List[str] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE : Dict = [torch.from_numpy(_UpperCAmelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=_UpperCAmelCase )
@property
def UpperCamelCase_ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def UpperCamelCase_ ( self ) -> List[str]:
pass
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Any = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase_ ( self ) -> List[Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Tuple = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : str = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class lowerCAmelCase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_UpperCAmelCase )
_SCREAMING_SNAKE_CASE : int = 3
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Any = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 0
|
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_SCREAMING_SNAKE_CASE : Tuple = b * b - 4 * a * c
_SCREAMING_SNAKE_CASE : Tuple = (-b + sqrt(__lowerCAmelCase )) / (2 * a)
_SCREAMING_SNAKE_CASE : Any = (-b - sqrt(__lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = quadratic_roots(a=5, b=6, c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCamelCase__ =' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
_SCREAMING_SNAKE_CASE : Dict = self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> str:
_SCREAMING_SNAKE_CASE : int = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_SCREAMING_SNAKE_CASE : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
_SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.transformer_dir , "new_code.py" )
with open(_lowerCamelCase , "w" , newline="\n" ) as f:
f.write(_lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase )
with open(_lowerCamelCase , "r" ) as f:
self.assertTrue(f.read() , _lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , _lowerCamelCase ) , )
# Copy consistency with a really long name
_SCREAMING_SNAKE_CASE : List[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , _lowerCamelCase , _lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , _lowerCamelCase , overwrite_result=re.sub("Bert" , "TestModel" , _lowerCamelCase ) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
_SCREAMING_SNAKE_CASE : List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
_SCREAMING_SNAKE_CASE : int = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
_SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme["format_model_list"] )
self.assertFalse(_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
_SCREAMING_SNAKE_CASE : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
_SCREAMING_SNAKE_CASE : str = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
_SCREAMING_SNAKE_CASE : Dict = check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 354
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 0
|
from manim import *
class lowerCAmelCase__( lowercase__ ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE : Dict = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Tuple = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Tuple = VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : List[str] = VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : List[Any] = VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : List[str] = Text("CPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : Tuple = Text("GPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Optional[int] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : str = VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : int = Text("Model" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Dict = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Any = []
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE : List[Any] = fill.copy().set_fill(_a , opacity=0.8 )
target.move_to(_a )
model_arr.append(_a )
_SCREAMING_SNAKE_CASE : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : List[Any] = [meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Tuple = VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : Tuple = VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : List[Any] = VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE : List[Any] = Text("Disk" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Any = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4, -1.25, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE : int = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE : List[Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE : Dict = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(_a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _a , buff=0.5 )
self.play(Write(_a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_a , buff=0.02 )
self.play(MoveToTarget(_a ) )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE : List[Any] = Arrow(start=_a , end=_a , color=_a , buff=0.5 )
a.next_to(model_arr[0].get_left() , _a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_SCREAMING_SNAKE_CASE : int = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
_SCREAMING_SNAKE_CASE : List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_a ) , Circumscribe(model_arr[0] , color=_a , **_a ) , Circumscribe(model_cpu_arr[0] , color=_a , **_a ) , Circumscribe(gpu_rect[0] , color=_a , **_a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_SCREAMING_SNAKE_CASE : str = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AnimationGroup(
FadeOut(_a , run_time=0.5 ) , MoveToTarget(_a , run_time=0.5 ) , FadeIn(_a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_SCREAMING_SNAKE_CASE : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **_a ) , Circumscribe(cpu_left_col_base[i] , **_a ) , Circumscribe(cpu_left_col_base[i + 1] , color=_a , **_a ) , Circumscribe(gpu_rect[0] , color=_a , **_a ) , Circumscribe(model_arr[i + 1] , color=_a , **_a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_a , **_a ) , Circumscribe(cpu_left_col_base[-1] , color=_a , **_a ) , Circumscribe(gpu_rect[0] , color=_a , **_a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = a_c
_SCREAMING_SNAKE_CASE : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_a ) , FadeOut(_a , run_time=0.5 ) , )
_SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) , MoveToTarget(_a ) )
self.wait()
| 355
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = []
create_all_state(1, __snake_case, __snake_case, [], __snake_case )
return result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__snake_case, total_number - level + 2 ):
current_list.append(__snake_case )
create_all_state(i + 1, __snake_case, level - 1, __snake_case, __snake_case )
current_list.pop()
def lowerCamelCase__ (__lowerCamelCase ):
for i in total_list:
print(*__snake_case )
if __name__ == "__main__":
UpperCamelCase__ =4
UpperCamelCase__ =2
UpperCamelCase__ =generate_all_combinations(n, k)
print_all_state(total_list)
| 356
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ ={
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 0
|
from itertools import product
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = sides_number
_SCREAMING_SNAKE_CASE : List[Any] = max_face_number * dice_number
_SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * (max_total + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : Any = range(lowerCamelCase_, max_face_number + 1 )
for dice_numbers in product(lowerCamelCase_, repeat=lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = sum(lowerCamelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : str = total_frequency_distribution(
sides_number=4, dice_number=9 )
_SCREAMING_SNAKE_CASE : List[str] = total_frequency_distribution(
sides_number=6, dice_number=6 )
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 9
_SCREAMING_SNAKE_CASE : str = 4 * 9
_SCREAMING_SNAKE_CASE : Any = 6
for peter_total in range(lowerCamelCase_, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (4**9) * (6**6)
_SCREAMING_SNAKE_CASE : Union[str, Any] = peter_wins_count / total_games_number
_SCREAMING_SNAKE_CASE : Union[str, Any] = round(lowerCamelCase_, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"{solution() = }")
| 358
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 0
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = nn.functional.normalize(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(__lowerCamelCase )
return torch.mm(__lowerCamelCase, normalized_text_embeds.t() )
class lowerCAmelCase__( __A ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self , __lowerCamelCase ) -> int:
super().__init__(__lowercase )
_SCREAMING_SNAKE_CASE : List[str] = CLIPVisionModel(config.vision_config )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__lowercase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=__lowercase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__lowercase )
_SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(1_7 ) , requires_grad=__lowercase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=__lowercase )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = self.vision_model(__lowercase )[1] # pooled_output
_SCREAMING_SNAKE_CASE : Tuple = self.visual_projection(__lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE : Tuple = cosine_distance(__lowercase , self.special_care_embeds ).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(__lowercase , self.concept_embeds ).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_embeds.shape[0]
for i in range(__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : Dict = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = special_cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : Tuple = self.special_care_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_SCREAMING_SNAKE_CASE : Tuple = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_SCREAMING_SNAKE_CASE : str = cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : List[Any] = self.concept_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowercase )
result.append(__lowercase )
_SCREAMING_SNAKE_CASE : str = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = self.vision_model(__lowercase )[1] # pooled_output
_SCREAMING_SNAKE_CASE : str = self.visual_projection(__lowercase )
_SCREAMING_SNAKE_CASE : Tuple = cosine_distance(__lowercase , self.special_care_embeds )
_SCREAMING_SNAKE_CASE : Tuple = cosine_distance(__lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : int = 0.0
_SCREAMING_SNAKE_CASE : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
_SCREAMING_SNAKE_CASE : Any = special_care * 0.01
_SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_SCREAMING_SNAKE_CASE : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 359
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowercase__ =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowercase__ ='main'
# Default branch name
lowercase__ ='f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowercase__ ='aaaaaaa'
# This commit does not exist, so we should 404.
lowercase__ ='d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowercase__ ='4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def lowerCamelCase__ ():
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowerCamelCase__ ():
print("Bonjour!" )
yield
print("Au revoir!" )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCamelCase_ ( self ) -> Optional[int]:
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] )
class lowerCAmelCase__( A_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
@require_tf
def UpperCamelCase_ ( self ) -> Any:
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
self.assertEqual(find_labels(snake_case__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(snake_case__ ) , ["start_positions", "end_positions"] )
class lowerCAmelCase__( A_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(snake_case__ ) , ["labels"] )
@require_flax
def UpperCamelCase_ ( self ) -> Any:
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
self.assertEqual(find_labels(snake_case__ ) , [] )
class lowerCAmelCase__( A_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(snake_case__ ) , [] )
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 0
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase__ =object()
# For specifying empty leaf dict `{}`
UpperCamelCase__ =object()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowerCAmelCase__ ) - len(lowerCAmelCase__ ) + 1 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [x.match(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__, ks[i:] )]
if matches and all(lowerCAmelCase__ ):
return True
return False
def lowerCamelCase__ (__lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(lowerCAmelCase__, lowerCAmelCase__ ):
return replacement
return val
return replace
def lowerCamelCase__ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", lowerCAmelCase__ )),
(("transformer", "wte", "embedding"), P("mp", lowerCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase__, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", lowerCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase__, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", lowerCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = _get_partition_rules()
_SCREAMING_SNAKE_CASE : Dict = _replacement_rules(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Any = {k: _unmatched for k in flatten_dict(lowerCAmelCase__ )}
_SCREAMING_SNAKE_CASE : List[Any] = {k: replace(lowerCAmelCase__, lowerCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase__ ) )
| 361
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 0
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_SCREAMING_SNAKE_CASE : int = torch.zeros(__lowerCamelCase , __lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : List[str] = torch.nn.Parameter(__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
__snake_case = 4_2
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=__lowerCamelCase , transformer=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , scheduler=__lowerCamelCase , learned_classifier_free_sampling_embeddings=__lowerCamelCase , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else 1
# get prompt text embeddings
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_SCREAMING_SNAKE_CASE : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
_SCREAMING_SNAKE_CASE : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_SCREAMING_SNAKE_CASE : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCamelCase )
# duplicate text embeddings for each generation per prompt
_SCREAMING_SNAKE_CASE : Optional[int] = prompt_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCamelCase , 1 , 1 )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [''] * batch_size
_SCREAMING_SNAKE_CASE : Optional[int] = text_input_ids.shape[-1]
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE : List[str] = negative_prompt_embeds.shape[1]
_SCREAMING_SNAKE_CASE : int = negative_prompt_embeds.repeat(1 , __lowerCamelCase , 1 )
_SCREAMING_SNAKE_CASE : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __lowerCamelCase , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 5.0 , __lowerCamelCase = 1.0 , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = 1 , ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = 1
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = len(__lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}""" )
_SCREAMING_SNAKE_CASE : str = batch_size * num_images_per_prompt
_SCREAMING_SNAKE_CASE : List[Any] = guidance_scale > 1.0
_SCREAMING_SNAKE_CASE : List[str] = self._encode_prompt(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase , __lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
_SCREAMING_SNAKE_CASE : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_SCREAMING_SNAKE_CASE : Tuple = self.transformer.num_vector_embeds - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.full(__lowerCamelCase , __lowerCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_SCREAMING_SNAKE_CASE : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
_SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps.to(self.device )
_SCREAMING_SNAKE_CASE : Tuple = latents
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_SCREAMING_SNAKE_CASE : str = self.transformer(__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , timestep=__lowerCamelCase ).sample
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE : str = model_output.chunk(2 )
_SCREAMING_SNAKE_CASE : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCamelCase , dim=1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.truncate(__lowerCamelCase , __lowerCamelCase )
# remove `log(0)`'s (`-inf`s)
_SCREAMING_SNAKE_CASE : Optional[int] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE : Any = self.scheduler.step(__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.vqvae.config.vq_embed_dim
_SCREAMING_SNAKE_CASE : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_SCREAMING_SNAKE_CASE : Optional[int] = self.vqvae.quantize.get_codebook_entry(__lowerCamelCase , shape=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.vqvae.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase ).sample
_SCREAMING_SNAKE_CASE : Any = (image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.sort(__lowerCamelCase , 1 , descending=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = torch.exp(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_SCREAMING_SNAKE_CASE : List[str] = torch.full_like(keep_mask[:, 0:1, :] , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = torch.cat((all_true, keep_mask) , dim=1 )
_SCREAMING_SNAKE_CASE : List[Any] = keep_mask[:, :-1, :]
_SCREAMING_SNAKE_CASE : str = keep_mask.gather(1 , indices.argsort(1 ) )
_SCREAMING_SNAKE_CASE : List[str] = log_p_x_0.clone()
_SCREAMING_SNAKE_CASE : List[str] = -torch.inf # -inf = log(0)
return rv
| 362
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 0
|
from math import sqrt
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
_SCREAMING_SNAKE_CASE : Optional[Any] = True
# 0 and 1 are none primes.
if number <= 1:
_SCREAMING_SNAKE_CASE : Dict = False
for divisor in range(2, int(round(sqrt(_lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_SCREAMING_SNAKE_CASE : Any = False
break
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'status' must been from type bool"
return status
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_SCREAMING_SNAKE_CASE : int = list(range(2, n + 1 ) )
_SCREAMING_SNAKE_CASE : int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1, len(_lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_SCREAMING_SNAKE_CASE : str = 0
# filters actual prime numbers.
_SCREAMING_SNAKE_CASE : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
_SCREAMING_SNAKE_CASE : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(_lowerCamelCase ):
ans.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
_SCREAMING_SNAKE_CASE : int = [] # this list will be returns of the function.
# potential prime number factors.
_SCREAMING_SNAKE_CASE : List[str] = 2
_SCREAMING_SNAKE_CASE : List[Any] = number
if number == 0 or number == 1:
ans.append(_lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCamelCase ):
while quotient != 1:
if is_prime(_lowerCamelCase ) and (quotient % factor == 0):
ans.append(_lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_SCREAMING_SNAKE_CASE : List[str] = 0
# prime factorization of 'number'
_SCREAMING_SNAKE_CASE : Any = prime_factorization(_lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = max(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_SCREAMING_SNAKE_CASE : List[Any] = 0
# prime factorization of 'number'
_SCREAMING_SNAKE_CASE : int = prime_factorization(_lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = min(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0, _lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0, _lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ (__lowerCamelCase ):
assert (
isinstance(_lowerCamelCase, _lowerCamelCase ) and (number > 2) and is_even(_lowerCamelCase )
), "'number' must been an int, even and > 2"
_SCREAMING_SNAKE_CASE : List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_SCREAMING_SNAKE_CASE : Tuple = get_prime_numbers(_lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = len(_lowerCamelCase )
# run variable for while-loops.
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Dict = None
# exit variable. for break up the loops
_SCREAMING_SNAKE_CASE : int = True
while i < len_pn and loop:
_SCREAMING_SNAKE_CASE : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_SCREAMING_SNAKE_CASE : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and (len(_lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and isinstance(_lowerCamelCase, _lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
while numbera != 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = numbera % numbera
_SCREAMING_SNAKE_CASE : Any = numbera
_SCREAMING_SNAKE_CASE : Dict = rest
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and isinstance(_lowerCamelCase, _lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_SCREAMING_SNAKE_CASE : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_SCREAMING_SNAKE_CASE : int = prime_factorization(_lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(_lowerCamelCase )
elif numbera == 1 or numbera == 1:
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Dict = max(_lowerCamelCase, _lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_SCREAMING_SNAKE_CASE : Optional[Any] = prime_fac_a.count(_lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = prime_fac_a.count(_lowerCamelCase )
for _ in range(max(_lowerCamelCase, _lowerCamelCase ) ):
ans *= n
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = prime_fac_a.count(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
ans *= n
done.append(_lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_SCREAMING_SNAKE_CASE : List[str] = prime_fac_a.count(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
ans *= n
done.append(_lowerCamelCase )
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCamelCase ):
ans += 1
# precondition
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and is_prime(
_lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
assert (
is_prime(_lowerCamelCase ) and is_prime(_lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_SCREAMING_SNAKE_CASE : List[str] = p_number_a + 1 # jump to the next number
_SCREAMING_SNAKE_CASE : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and ans[0] != p_number_a
and ans[len(_lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
_SCREAMING_SNAKE_CASE : Any = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(_lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_SCREAMING_SNAKE_CASE : Optional[int] = get_divisors(_lowerCamelCase )
# precondition
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and isinstance(_lowerCamelCase, _lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_SCREAMING_SNAKE_CASE : Tuple = gcd(abs(_lowerCamelCase ), abs(_lowerCamelCase ) )
# precondition
assert (
isinstance(_lowerCamelCase, _lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
_SCREAMING_SNAKE_CASE : Tuple = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_lowerCamelCase, _lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : str = 1 # this will be return
for _ in range(n - 1 ):
_SCREAMING_SNAKE_CASE : Any = ans
ans += fiba
_SCREAMING_SNAKE_CASE : Any = tmp
return ans
| 363
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = 42
__snake_case = None
__snake_case = None
UpperCamelCase__ =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ (__lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(__lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = get_distrib(node.left )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = get_distrib(node.right )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - left_distrib_excess
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - right_distrib_excess
_SCREAMING_SNAKE_CASE : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
_SCREAMING_SNAKE_CASE : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_, a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase__:
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=6_4 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = parent
_SCREAMING_SNAKE_CASE : int = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
_SCREAMING_SNAKE_CASE : Dict = is_training
_SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
_SCREAMING_SNAKE_CASE : str = use_token_type_ids
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Dict = embedding_size
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Any = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
_SCREAMING_SNAKE_CASE : List[str] = num_choices
_SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = MobileBertModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = model(_a , attention_mask=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = MobileBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Any = MobileBertForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = MobileBertForPreTraining(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = MobileBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Any = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = MobileBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = MobileBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.num_choices
_SCREAMING_SNAKE_CASE : Tuple = MobileBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) : Any = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
__snake_case = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
_SCREAMING_SNAKE_CASE : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = MobileBertModelTester(self )
_SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_a )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_a )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_a )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_a )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_a )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_a )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_a )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_a )
def lowerCamelCase__ (__lowerCamelCase ):
return torch.tensor(
__a, dtype=torch.long, device=__a, )
UpperCamelCase__ =1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(_a )
_SCREAMING_SNAKE_CASE : int = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Tuple = model(_a )[0]
_SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[
[-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05],
[-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00],
[2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01],
]
] , device=_a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_SCREAMING_SNAKE_CASE : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 365
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase__( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__snake_case = "wavlm"
def __init__( self , __lowerCamelCase=3_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCamelCase=False , __lowerCamelCase=1_2_8 , __lowerCamelCase=1_6 , __lowerCamelCase=3_2_0 , __lowerCamelCase=8_0_0 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.05 , __lowerCamelCase=1_0 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=1_0 , __lowerCamelCase=3_2_0 , __lowerCamelCase=2 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0_0 , __lowerCamelCase=2_5_6 , __lowerCamelCase=2_5_6 , __lowerCamelCase=0.1 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=2_5_6 , __lowerCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowerCamelCase=(5, 3, 3, 1, 1) , __lowerCamelCase=(1, 2, 3, 1, 1) , __lowerCamelCase=5_1_2 , __lowerCamelCase=8_0 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=False , __lowerCamelCase=3 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : int = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_activation
_SCREAMING_SNAKE_CASE : List[str] = list(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = list(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = list(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = conv_bias
_SCREAMING_SNAKE_CASE : Any = num_buckets
_SCREAMING_SNAKE_CASE : List[Any] = max_bucket_distance
_SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : List[Any] = len(self.conv_dim )
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Any = activation_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = feat_proj_dropout
_SCREAMING_SNAKE_CASE : List[Any] = final_dropout
_SCREAMING_SNAKE_CASE : str = layerdrop
_SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = num_ctc_classes
_SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = do_stable_layer_norm
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : Dict = apply_spec_augment
_SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : Dict = mask_feature_prob
_SCREAMING_SNAKE_CASE : Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
_SCREAMING_SNAKE_CASE : int = num_codevectors_per_group
_SCREAMING_SNAKE_CASE : Tuple = num_codevector_groups
_SCREAMING_SNAKE_CASE : List[str] = contrastive_logits_temperature
_SCREAMING_SNAKE_CASE : List[str] = num_negatives
_SCREAMING_SNAKE_CASE : Any = codevector_dim
_SCREAMING_SNAKE_CASE : str = proj_codevector_dim
_SCREAMING_SNAKE_CASE : Any = diversity_loss_weight
# ctc loss
_SCREAMING_SNAKE_CASE : Dict = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : List[str] = ctc_zero_infinity
# adapter
_SCREAMING_SNAKE_CASE : Optional[Any] = add_adapter
_SCREAMING_SNAKE_CASE : List[Any] = adapter_kernel_size
_SCREAMING_SNAKE_CASE : int = adapter_stride
_SCREAMING_SNAKE_CASE : List[Any] = num_adapter_layers
_SCREAMING_SNAKE_CASE : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE : List[Any] = list(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = list(__UpperCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = xvector_output_dim
@property
def UpperCamelCase_ ( self ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 366
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# load base model
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline.from_pretrained(a_, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_SCREAMING_SNAKE_CASE : List[str] = load_file(a_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_SCREAMING_SNAKE_CASE : Tuple = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
_SCREAMING_SNAKE_CASE : int = pipeline.text_encoder
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
_SCREAMING_SNAKE_CASE : Tuple = pipeline.unet
# find the target layer
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
_SCREAMING_SNAKE_CASE : List[Any] = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
_SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_SCREAMING_SNAKE_CASE : str = layer_infos.pop(0 )
_SCREAMING_SNAKE_CASE : Dict = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down", "lora_up" ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace("lora_up", "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_SCREAMING_SNAKE_CASE : Dict = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_, a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_SCREAMING_SNAKE_CASE : List[str] = state_dict[pair_keys[0]].to(torch.floataa )
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_, a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
UpperCamelCase__ =parser.parse_args()
UpperCamelCase__ =args.base_model_path
UpperCamelCase__ =args.checkpoint_path
UpperCamelCase__ =args.dump_path
UpperCamelCase__ =args.lora_prefix_unet
UpperCamelCase__ =args.lora_prefix_text_encoder
UpperCamelCase__ =args.alpha
UpperCamelCase__ =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase__ =pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 367
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 0
|
UpperCamelCase__ ={str(digit): digit**5 for digit in range(10)}
def lowerCamelCase__ (__lowerCamelCase ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase__ ():
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(_UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 368
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = len(a__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(a__ ) - pat_len + 1 ):
_SCREAMING_SNAKE_CASE : Dict = True
for j in range(a__ ):
if s[i + j] != pattern[j]:
_SCREAMING_SNAKE_CASE : Any = False
break
if match_found:
position.append(a__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 369
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase__ ='pt'
elif is_tf_available():
UpperCamelCase__ ='tf'
else:
UpperCamelCase__ ='jax'
class lowerCAmelCase__( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ByTaTokenizer
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
_SCREAMING_SNAKE_CASE : int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> Optional[Any]:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=2_0 , __lowerCamelCase=5 ) -> Optional[Any]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda __lowerCamelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , _lowerCAmelCase ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
_SCREAMING_SNAKE_CASE : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
_SCREAMING_SNAKE_CASE : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
_SCREAMING_SNAKE_CASE : int = [t[0] for t in toks]
# Ensure consistency
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
_SCREAMING_SNAKE_CASE : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
_SCREAMING_SNAKE_CASE : List[str] = """ """ + output_txt
_SCREAMING_SNAKE_CASE : str = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Dict = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE : Any = """Unicode €."""
_SCREAMING_SNAKE_CASE : Dict = tokenizer(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["input_ids"] , _lowerCAmelCase )
# decoding
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , "Unicode €.</s>" )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer("e è é ê ë" )
_SCREAMING_SNAKE_CASE : Any = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["input_ids"] , _lowerCAmelCase )
# decoding
_SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_SCREAMING_SNAKE_CASE : Optional[int] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
_SCREAMING_SNAKE_CASE : int = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
_SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertNotIn("decoder_input_ids" , _lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" , _lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE : int = [
"""Summary of the text.""",
"""Another summary.""",
]
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
text_target=_lowerCAmelCase , max_length=3_2 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""A long paragraph for summarization. </s>"""]
_SCREAMING_SNAKE_CASE : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_SCREAMING_SNAKE_CASE : List[str] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
_SCREAMING_SNAKE_CASE : str = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_lowerCAmelCase , text_target=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch["input_ids"][0] )
self.assertEqual(_lowerCAmelCase , batch["labels"][0] )
def UpperCamelCase_ ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
_SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : int = """ He is very happy, UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["bim", "bambam"] )
_SCREAMING_SNAKE_CASE : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
_SCREAMING_SNAKE_CASE : str = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_SCREAMING_SNAKE_CASE : Any = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
_SCREAMING_SNAKE_CASE : Optional[int] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
_SCREAMING_SNAKE_CASE : str = json.load(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
_SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_SCREAMING_SNAKE_CASE : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_SCREAMING_SNAKE_CASE : Any = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_SCREAMING_SNAKE_CASE : Optional[Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=_lowerCAmelCase )]
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained(_lowerCAmelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == "" )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self ) -> Dict:
pass
def UpperCamelCase_ ( self ) -> Dict:
pass
def UpperCamelCase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_SCREAMING_SNAKE_CASE : int = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_SCREAMING_SNAKE_CASE : str = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_SCREAMING_SNAKE_CASE : int = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 370
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase__ =parser.parse_args()
if args.model_type == "bert":
UpperCamelCase__ =BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ ="bert"
else:
raise ValueError('args.model_type should be \"bert\".')
UpperCamelCase__ =model.state_dict()
UpperCamelCase__ ={}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase__ =state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCamelCase__ =state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCamelCase__ =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCamelCase__ =state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCamelCase__ =state_dict["cls.predictions.decoder.weight"]
UpperCamelCase__ =state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ =state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCamelCase__ =state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 371
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'sentencepiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase__ ={
'google/rembert': 256,
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="[UNK]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="[PAD]" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , **__lowerCamelCase , ) -> str:
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : Optional[int] = keep_accents
_SCREAMING_SNAKE_CASE : Tuple = vocab_file
_SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_a )
@property
def UpperCamelCase_ ( self ) -> List[str]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : str = d
_SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=False ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(_a )
return pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
return self.sp_model.PieceToId(_a )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.IdToPiece(_a )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.decode_pieces(_a )
return out_string
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
_SCREAMING_SNAKE_CASE : Any = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 350
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 0
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = MobileBertTokenizer
__snake_case = MobileBertTokenizerFast
__snake_case = True
__snake_case = True
__snake_case = filter_non_english
__snake_case = """google/mobilebert-uncased"""
def UpperCamelCase_ ( self ) -> str:
super().setUp()
_SCREAMING_SNAKE_CASE : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE : Optional[int] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[Any] = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE : List[str] = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowercase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def UpperCamelCase_ ( self ) -> str:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(lowercase_ )
_SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# With lower casing
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer(do_lower_case=lowercase_ )
_SCREAMING_SNAKE_CASE : List[Any] = self.get_rust_tokenizer(do_lower_case=lowercase_ )
_SCREAMING_SNAKE_CASE : Tuple = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(lowercase_ )
_SCREAMING_SNAKE_CASE : int = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowercase_ )
_SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=lowercase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_SCREAMING_SNAKE_CASE : List[str] = {}
for i, token in enumerate(lowercase_ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = i
_SCREAMING_SNAKE_CASE : List[str] = WordpieceTokenizer(vocab=lowercase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCamelCase_ ( self ) -> Dict:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCamelCase_ ( self ) -> int:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCamelCase_ ( self ) -> Tuple:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
_SCREAMING_SNAKE_CASE : str = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def UpperCamelCase_ ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(lowercase_ , "do_lower_case" ) else False
_SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = ["的", "人", "有"]
_SCREAMING_SNAKE_CASE : Any = "".join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : str = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowercase_ )
_SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE : Any = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 351
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCamelCase__ ="""src/transformers"""
UpperCamelCase__ ="""docs/source/en"""
UpperCamelCase__ ="""."""
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with open(_snake_case, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : List[Any] = f.readlines()
# Find the start prompt.
_SCREAMING_SNAKE_CASE : Dict = 0
while not lines[start_index].startswith(_snake_case ):
start_index += 1
start_index += 1
_SCREAMING_SNAKE_CASE : Optional[int] = start_index
while not lines[end_index].startswith(_snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCamelCase__ ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
UpperCamelCase__ =re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCamelCase__ =re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase__ =re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", _snake_case )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(_snake_case )
_SCREAMING_SNAKE_CASE : str = (width - text_length) // 2
_SCREAMING_SNAKE_CASE : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(_snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(_snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = collections.defaultdict(_snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = collections.defaultdict(_snake_case )
_SCREAMING_SNAKE_CASE : Any = collections.defaultdict(_snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(_snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = None
if attr_name.endswith("Tokenizer" ):
_SCREAMING_SNAKE_CASE : Tuple = slow_tokenizers
_SCREAMING_SNAKE_CASE : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_SCREAMING_SNAKE_CASE : List[Any] = fast_tokenizers
_SCREAMING_SNAKE_CASE : Optional[int] = attr_name[:-13]
elif _re_tf_models.match(_snake_case ) is not None:
_SCREAMING_SNAKE_CASE : List[Any] = tf_models
_SCREAMING_SNAKE_CASE : int = _re_tf_models.match(_snake_case ).groups()[0]
elif _re_flax_models.match(_snake_case ) is not None:
_SCREAMING_SNAKE_CASE : Tuple = flax_models
_SCREAMING_SNAKE_CASE : Optional[int] = _re_flax_models.match(_snake_case ).groups()[0]
elif _re_pt_models.match(_snake_case ) is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = pt_models
_SCREAMING_SNAKE_CASE : List[Any] = _re_pt_models.match(_snake_case ).groups()[0]
if lookup_dict is not None:
while len(_snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
_SCREAMING_SNAKE_CASE : Optional[int] = True
break
# Try again after removing the last word in the name
_SCREAMING_SNAKE_CASE : Dict = ''''''.join(camel_case_split(_snake_case )[:-1] )
# Let's build that table!
_SCREAMING_SNAKE_CASE : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_SCREAMING_SNAKE_CASE : List[Any] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_SCREAMING_SNAKE_CASE : int = [len(_snake_case ) + 2 for c in columns]
_SCREAMING_SNAKE_CASE : List[Any] = max([len(_snake_case ) for name in model_names] ) + 2
# Build the table per se
_SCREAMING_SNAKE_CASE : Optional[int] = '''|''' + '''|'''.join([_center_text(_snake_case, _snake_case ) for c, w in zip(_snake_case, _snake_case )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_SCREAMING_SNAKE_CASE : Optional[int] = {True: '''✅''', False: '''❌'''}
for name in model_names:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_name_to_prefix[name]
_SCREAMING_SNAKE_CASE : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_snake_case, _snake_case ) for l, w in zip(_snake_case, _snake_case )] ) + "|\n"
return table
def lowerCamelCase__ (__lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : int = _find_text_in_file(
filename=os.path.join(_snake_case, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
_SCREAMING_SNAKE_CASE : Optional[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_snake_case, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 0
|
import math
import unittest
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(_A, _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(_A ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
with self.assertRaises(snake_case__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , "Zero doesn\'t have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 0, __lowerCamelCase = 0 ):
_SCREAMING_SNAKE_CASE : Any = right or len(__lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCamelCase, __lowerCamelCase, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertTokenizer
__snake_case = BertTokenizerFast
__snake_case = True
__snake_case = True
__snake_case = filter_non_english
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
_SCREAMING_SNAKE_CASE : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE : Tuple = """unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def UpperCamelCase_ ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : List[str] = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# With lower casing
_SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Any = BasicTokenizer()
_SCREAMING_SNAKE_CASE : Any = """a\n'll !!to?'d of, can't."""
_SCREAMING_SNAKE_CASE : Dict = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE : int = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE : str = i
_SCREAMING_SNAKE_CASE : Dict = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCamelCase_ ( self ) -> Any:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCamelCase_ ( self ) -> Any:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCamelCase_ ( self ) -> Tuple:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained("bert-base-uncased" )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def UpperCamelCase_ ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , "do_lower_case" ) else False
_SCREAMING_SNAKE_CASE : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE : Tuple = """""".join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE : Tuple = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 355
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase__( __UpperCamelCase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
__snake_case = 42
__snake_case = (1_6, 3_2, 9_6, 2_5_6)
__snake_case = jnp.floataa
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE : int = []
for i in range(len(self.block_out_channels ) - 1 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.block_out_channels[i]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.block_out_channels[i + 1]
_SCREAMING_SNAKE_CASE : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = blocks
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.conv_in(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(__lowerCamelCase )
for block in self.blocks:
_SCREAMING_SNAKE_CASE : str = block(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = nn.silu(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class lowerCAmelCase__( nn.Module , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 3_2
__snake_case = 4
__snake_case = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__snake_case = False
__snake_case = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__snake_case = 2
__snake_case = 8
__snake_case = None
__snake_case = 1_2_8_0
__snake_case = 0.0
__snake_case = False
__snake_case = jnp.floataa
__snake_case = True
__snake_case = 0
__snake_case = "rgb"
__snake_case = (1_6, 3_2, 9_6, 2_5_6)
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
# init input tensors
_SCREAMING_SNAKE_CASE : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
_SCREAMING_SNAKE_CASE : Tuple = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.block_out_channels
_SCREAMING_SNAKE_CASE : Dict = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_SCREAMING_SNAKE_CASE : int = self.num_attention_heads or self.attention_head_dim
# input
_SCREAMING_SNAKE_CASE : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_SCREAMING_SNAKE_CASE : List[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_SCREAMING_SNAKE_CASE : List[Any] = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_SCREAMING_SNAKE_CASE : Any = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
_SCREAMING_SNAKE_CASE : int = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
_SCREAMING_SNAKE_CASE : int = output_channel
_SCREAMING_SNAKE_CASE : int = block_out_channels[i]
_SCREAMING_SNAKE_CASE : int = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
_SCREAMING_SNAKE_CASE : str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = down_blocks
_SCREAMING_SNAKE_CASE : List[str] = controlnet_down_blocks
# mid
_SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[-1]
_SCREAMING_SNAKE_CASE : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE : str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1.0 , __lowerCamelCase = True , __lowerCamelCase = False , ) -> Tuple:
_SCREAMING_SNAKE_CASE : str = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_SCREAMING_SNAKE_CASE : int = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
_SCREAMING_SNAKE_CASE : str = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE : Any = jnp.expand_dims(__lowerCamelCase , 0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_proj(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self.time_embedding(__lowerCamelCase )
# 2. pre-process
_SCREAMING_SNAKE_CASE : List[Any] = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE : Tuple = self.conv_in(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE : Dict = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
_SCREAMING_SNAKE_CASE : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
_SCREAMING_SNAKE_CASE : str = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
_SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
_SCREAMING_SNAKE_CASE : str = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
_SCREAMING_SNAKE_CASE : Any = controlnet_down_block_res_samples
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
_SCREAMING_SNAKE_CASE : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 356
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = " " ):
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for index, char in enumerate(A__ ):
if char == separator:
split_words.append(string[last_index:index] )
_SCREAMING_SNAKE_CASE : Any = index + 1
elif index + 1 == len(A__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 0
|
import argparse
import json
import subprocess
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Optional[int] = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_SCREAMING_SNAKE_CASE : Optional[int] = subprocess.run(__lowerCamelCase, shell=__lowerCamelCase, stdout=subprocess.PIPE )
_SCREAMING_SNAKE_CASE : Tuple = output.stdout.decode("utf-8" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowerCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt", "w" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : int = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def lowerCamelCase__ (__lowerCamelCase ):
return values.split("," )
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
UpperCamelCase__ =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 358
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase ):
if n == 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE : Tuple = [0, 1]
for i in range(2, n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE : int = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 359
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 0
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ ="""platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = PegasusConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : Optional[int] = batch_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
_SCREAMING_SNAKE_CASE : Tuple = is_training
_SCREAMING_SNAKE_CASE : Dict = use_labels
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : int = num_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : Any = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = 2_0
_SCREAMING_SNAKE_CASE : List[Any] = model_class_name(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = model.encode(inputs_dict["input_ids"] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_SCREAMING_SNAKE_CASE : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_SCREAMING_SNAKE_CASE : Dict = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Any = 2_0
_SCREAMING_SNAKE_CASE : List[str] = model_class_name(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict["input_ids"] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_SCREAMING_SNAKE_CASE : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE : str = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Any = np.not_equal(lowerCAmelCase__, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxPegasusModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("JIT Enabled" ):
_SCREAMING_SNAKE_CASE : Any = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Dict = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : str = model_class(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_SCREAMING_SNAKE_CASE : str = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("JIT Enabled" ):
_SCREAMING_SNAKE_CASE : Any = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Optional[int] = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase_ ( self ) -> str:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((1, 1) )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_SCREAMING_SNAKE_CASE : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_SCREAMING_SNAKE_CASE : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_SCREAMING_SNAKE_CASE : List[str] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_SCREAMING_SNAKE_CASE : List[str] = tokenizer(__lowerCAmelCase , return_tensors="np" , truncation=__lowerCAmelCase , max_length=5_1_2 , padding=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[str] = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 0
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ['CLIPEncoderLayer']
def __init__( self , __lowerCamelCase ) -> Dict:
super().__init__(_snake_case )
_SCREAMING_SNAKE_CASE : List[str] = CLIPVisionModelWithProjection(config.vision_config )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
_SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.5 , __lowerCamelCase=0.5 ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(_snake_case )[0]
_SCREAMING_SNAKE_CASE : int = self.p_head(_snake_case )
_SCREAMING_SNAKE_CASE : Any = nsfw_detected.flatten()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nsfw_detected > p_threshold
_SCREAMING_SNAKE_CASE : List[Any] = nsfw_detected.tolist()
if any(_snake_case ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(_snake_case ):
if nsfw_detected_:
_SCREAMING_SNAKE_CASE : Any = np.zeros(images[idx].shape )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.w_head(_snake_case )
_SCREAMING_SNAKE_CASE : int = watermark_detected.flatten()
_SCREAMING_SNAKE_CASE : Union[str, Any] = watermark_detected > w_threshold
_SCREAMING_SNAKE_CASE : List[str] = watermark_detected.tolist()
if any(_snake_case ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(_snake_case ):
if watermark_detected_:
_SCREAMING_SNAKE_CASE : int = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 361
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 0
|
from PIL import Image
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowerCamelCase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
UpperCamelCase__ =change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 362
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 0
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = IFImgaImgSuperResolutionPipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return self._get_superresolution_dummy_components()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=0 ) -> int:
if str(_a ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase_ ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase_ ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase_ ( self ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase_ ( self ) -> List[str]:
self._test_save_load_local()
def UpperCamelCase_ ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 363
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=2_4 , __lowerCamelCase=2 , __lowerCamelCase=6 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=None , __lowerCamelCase=1_0_0_0 , ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_input_mask
_SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : int = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = num_labels
_SCREAMING_SNAKE_CASE : Tuple = scope
_SCREAMING_SNAKE_CASE : str = range_bbox
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE : List[Any] = bbox[i, j, 3]
_SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[i, j, 1]
_SCREAMING_SNAKE_CASE : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
_SCREAMING_SNAKE_CASE : Dict = bbox[i, j, 0]
_SCREAMING_SNAKE_CASE : Dict = t
_SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = LiltModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = LiltForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = LiltForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) : Any = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
return True
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = LiltModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE : Dict = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = LiltModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@slow
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[1, 2]] , device=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = torch.Size([1, 2, 7_6_8] )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 364
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ ={
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ =False, False, False
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = None
__snake_case = True
__snake_case = True
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__snake_case = field(default='Audio' , init=lowerCamelCase_ , repr=lowerCamelCase_ )
def __call__( self ) -> int:
return self.pa_type
def UpperCamelCase_ ( self , __lowerCamelCase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install \'soundfile\'." ) from err
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_SCREAMING_SNAKE_CASE : Union[str, Any] = BytesIO()
sf.write(lowerCAmelCase__ , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a \'sampling_rate\' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_SCREAMING_SNAKE_CASE : Dict = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_SCREAMING_SNAKE_CASE : List[str] = BytesIO(bytes() )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install \'librosa\' and \'soundfile\'." ) from err
_SCREAMING_SNAKE_CASE : Optional[int] = xsplitext(lowerCAmelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_SCREAMING_SNAKE_CASE : Dict = token_per_repo_id or {}
_SCREAMING_SNAKE_CASE : List[Any] = path.split("::" )[-1]
try:
_SCREAMING_SNAKE_CASE : Any = string_to_dict(lowerCAmelCase__ , config.HUB_DATASETS_URL )["repo_id"]
_SCREAMING_SNAKE_CASE : Optional[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_SCREAMING_SNAKE_CASE : List[str] = None
with xopen(lowerCAmelCase__ , "rb" , use_auth_token=lowerCAmelCase__ ) as f:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = sf.read(lowerCAmelCase__ )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = sf.read(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Any = array.T
if self.mono:
_SCREAMING_SNAKE_CASE : Optional[int] = librosa.to_mono(lowerCAmelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_SCREAMING_SNAKE_CASE : str = librosa.resample(lowerCAmelCase__ , orig_sr=lowerCAmelCase__ , target_sr=self.sampling_rate )
_SCREAMING_SNAKE_CASE : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCamelCase_ ( self , __lowerCamelCase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_SCREAMING_SNAKE_CASE : int = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
_SCREAMING_SNAKE_CASE : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_SCREAMING_SNAKE_CASE : str = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_SCREAMING_SNAKE_CASE : str = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_SCREAMING_SNAKE_CASE : Dict = pa.array([Audio().encode_example(lowerCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_SCREAMING_SNAKE_CASE : List[Any] = storage.field("bytes" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_SCREAMING_SNAKE_CASE : List[Any] = storage.field("path" )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
_SCREAMING_SNAKE_CASE : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase ):
with xopen(lowerCAmelCase__ , "rb" ) as f:
_SCREAMING_SNAKE_CASE : Dict = f.read()
return bytes_
_SCREAMING_SNAKE_CASE : Optional[int] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE : List[Any] = pa.array(
[os.path.basename(lowerCAmelCase__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_SCREAMING_SNAKE_CASE : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
| 366
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase__( UpperCamelCase__ ):
'''simple docstring'''
__snake_case = 'MCTCTFeatureExtractor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor
_SCREAMING_SNAKE_CASE : Dict = False
def __call__( self , *__lowerCamelCase , **__lowerCamelCase ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_SCREAMING_SNAKE_CASE : Dict = kwargs.pop("raw_speech" )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("audio" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = kwargs.pop("sampling_rate" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = args[0]
_SCREAMING_SNAKE_CASE : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_SCREAMING_SNAKE_CASE : Any = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE : str = encodings["input_ids"]
return inputs
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Dict:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = kwargs.pop("input_features" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("labels" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Tuple = args[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE : Any = labels["input_ids"]
return input_features
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> List[Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ) -> Optional[int]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = self.tokenizer
yield
_SCREAMING_SNAKE_CASE : str = self.feature_extractor
_SCREAMING_SNAKE_CASE : int = False
| 367
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 192
_SCREAMING_SNAKE_CASE : int = 768
_SCREAMING_SNAKE_CASE : Any = 12
_SCREAMING_SNAKE_CASE : Dict = 3
_SCREAMING_SNAKE_CASE : Optional[Any] = [800, 1333]
_SCREAMING_SNAKE_CASE : int = False
elif yolos_name == "yolos_s_dWr":
_SCREAMING_SNAKE_CASE : List[str] = 330
_SCREAMING_SNAKE_CASE : Dict = 14
_SCREAMING_SNAKE_CASE : Dict = 6
_SCREAMING_SNAKE_CASE : Optional[int] = 1320
elif "yolos_s" in yolos_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = 384
_SCREAMING_SNAKE_CASE : Tuple = 1536
_SCREAMING_SNAKE_CASE : Any = 12
_SCREAMING_SNAKE_CASE : Dict = 6
elif "yolos_b" in yolos_name:
_SCREAMING_SNAKE_CASE : List[str] = [800, 1344]
_SCREAMING_SNAKE_CASE : Optional[Any] = 91
_SCREAMING_SNAKE_CASE : Optional[Any] = "huggingface/label-files"
_SCREAMING_SNAKE_CASE : Any = "coco-detection-id2label.json"
_SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : List[Any] = idalabel
_SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[: config.hidden_size, :]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_SCREAMING_SNAKE_CASE : str = in_proj_weight[-config.hidden_size :, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ (__lowerCamelCase ):
if "backbone" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("backbone", "vit" )
if "cls_token" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("cls_token", "embeddings.cls_token" )
if "det_token" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("det_token", "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("mid_pos_embed", "encoder.mid_position_embeddings" )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("pos_embed", "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "blocks" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("blocks", "encoder.layer" )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("attn", "attention.self" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE : List[str] = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("mlp.fc2", "output.dense" )
if "class_embed" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("class_embed", "class_labels_classifier" )
if "bbox_embed" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("bbox_embed", "bbox_predictor" )
if "vit.norm" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("vit.norm", "vit.layernorm" )
return name
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : Any = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_SCREAMING_SNAKE_CASE : Dict = key.split("." )
_SCREAMING_SNAKE_CASE : Optional[Any] = int(key_split[2] )
_SCREAMING_SNAKE_CASE : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_SCREAMING_SNAKE_CASE : List[str] = val[:dim, :]
_SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : List[Any] = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
_SCREAMING_SNAKE_CASE : int = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : List[str] = val
return orig_state_dict
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : List[Any] = get_yolos_config(__lowerCamelCase )
# load original state_dict
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location="cpu" )["model"]
# load 🤗 model
_SCREAMING_SNAKE_CASE : Tuple = YolosForObjectDetection(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(__lowerCamelCase, __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
_SCREAMING_SNAKE_CASE : Any = 800 if yolos_name != "yolos_ti" else 512
_SCREAMING_SNAKE_CASE : Tuple = YolosImageProcessor(format="coco_detection", size=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=prepare_img(), return_tensors="pt" )
_SCREAMING_SNAKE_CASE : int = model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.logits, outputs.pred_boxes
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = None, None
if yolos_name == "yolos_ti":
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3], __lowerCamelCase, atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3], __lowerCamelCase, atol=1e-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
_SCREAMING_SNAKE_CASE : Any = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
_SCREAMING_SNAKE_CASE : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase, organization="hustvl" )
model.push_to_hub(__lowerCamelCase, organization="hustvl" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 368
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SpeechTaTokenizer
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaTokenizer(_a )
_SCREAMING_SNAKE_CASE : Tuple = AddedToken("<mask>" , lstrip=_a , rstrip=_a )
_SCREAMING_SNAKE_CASE : Optional[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = "this is a test"
_SCREAMING_SNAKE_CASE : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=2_0 , __lowerCamelCase=5 ) -> str:
_SCREAMING_SNAKE_CASE : str = self.get_input_output_texts(_a )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = "<pad>"
_SCREAMING_SNAKE_CASE : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(_a ) , 8_1 )
def UpperCamelCase_ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_SCREAMING_SNAKE_CASE : int = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.add_tokens(_a )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_SCREAMING_SNAKE_CASE : Tuple = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.add_special_tokens(_a )
_SCREAMING_SNAKE_CASE : Any = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
_SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
_SCREAMING_SNAKE_CASE : Dict = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=_a , )
| 369
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return [sentence[i : i + ngram_size] for i in range(len(__lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 370
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__( _a ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = 1 / 2_5_5 , __lowerCamelCase = True , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**snake_case_ )
_SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {"""shortest_edge""": 2_2_4}
_SCREAMING_SNAKE_CASE : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="crop_size" )
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Optional[int] = size
_SCREAMING_SNAKE_CASE : Optional[int] = resample
_SCREAMING_SNAKE_CASE : Dict = do_center_crop
_SCREAMING_SNAKE_CASE : List[str] = crop_size
_SCREAMING_SNAKE_CASE : List[Any] = do_rescale
_SCREAMING_SNAKE_CASE : Dict = rescale_factor
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
_SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE : Tuple = do_convert_rgb
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = PILImageResampling.BICUBIC , __lowerCamelCase = None , **__lowerCamelCase , ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE : Dict = get_resize_output_image_size(snake_case_ , size=size["shortest_edge"] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> Tuple:
_SCREAMING_SNAKE_CASE : str = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> Optional[Any]:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase , ) -> Optional[int]:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
_SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(snake_case_ , param_name="size" , default_to_square=snake_case_ )
_SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE : str = get_size_dict(snake_case_ , param_name="crop_size" , default_to_square=snake_case_ )
_SCREAMING_SNAKE_CASE : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE : Tuple = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE : Tuple = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE : int = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE : str = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE : List[Any] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_SCREAMING_SNAKE_CASE : str = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 371
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = SwinvaConfig()
_SCREAMING_SNAKE_CASE : int = swinva_name.split("_" )
_SCREAMING_SNAKE_CASE : str = name_split[1]
if "to" in name_split[3]:
_SCREAMING_SNAKE_CASE : Any = int(name_split[3][-3:] )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
_SCREAMING_SNAKE_CASE : Dict = int(name_split[2][-2:] )
else:
_SCREAMING_SNAKE_CASE : List[Any] = int(name_split[2][6:] )
if model_size == "tiny":
_SCREAMING_SNAKE_CASE : Any = 96
_SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 6, 2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
_SCREAMING_SNAKE_CASE : List[str] = 96
_SCREAMING_SNAKE_CASE : Optional[int] = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
_SCREAMING_SNAKE_CASE : Any = 128
_SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE : Tuple = (4, 8, 16, 32)
else:
_SCREAMING_SNAKE_CASE : List[Any] = 192
_SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (6, 12, 24, 48)
if "to" in swinva_name:
_SCREAMING_SNAKE_CASE : Tuple = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_SCREAMING_SNAKE_CASE : Optional[Any] = 21841
_SCREAMING_SNAKE_CASE : Optional[Any] = '''huggingface/label-files'''
_SCREAMING_SNAKE_CASE : List[str] = '''imagenet-22k-id2label.json'''
_SCREAMING_SNAKE_CASE : str = json.load(open(hf_hub_download(__lowerCAmelCase, __lowerCAmelCase, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : List[Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : int = idalabel
_SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
else:
_SCREAMING_SNAKE_CASE : Tuple = 1000
_SCREAMING_SNAKE_CASE : int = '''huggingface/label-files'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json'''
_SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase, __lowerCAmelCase, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
_SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : int = img_size
_SCREAMING_SNAKE_CASE : Any = num_classes
_SCREAMING_SNAKE_CASE : Tuple = embed_dim
_SCREAMING_SNAKE_CASE : Any = depths
_SCREAMING_SNAKE_CASE : str = num_heads
_SCREAMING_SNAKE_CASE : Any = window_size
return config
def lowerCamelCase__ (__lowerCamelCase ):
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("patch_embed.norm", "embeddings.norm" )
if "layers" in name:
_SCREAMING_SNAKE_CASE : List[Any] = '''encoder.''' + name
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
_SCREAMING_SNAKE_CASE : int = name.replace("attn", "attention.self" )
if "norm1" in name:
_SCREAMING_SNAKE_CASE : Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
_SCREAMING_SNAKE_CASE : List[Any] = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("mlp.fc2", "output.dense" )
if "q_bias" in name:
_SCREAMING_SNAKE_CASE : str = name.replace("q_bias", "query.bias" )
if "k_bias" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("k_bias", "key.bias" )
if "v_bias" in name:
_SCREAMING_SNAKE_CASE : Dict = name.replace("v_bias", "value.bias" )
if "cpb_mlp" in name:
_SCREAMING_SNAKE_CASE : Tuple = name.replace("cpb_mlp", "continuous_position_bias_mlp" )
if name == "norm.weight":
_SCREAMING_SNAKE_CASE : Tuple = '''layernorm.weight'''
if name == "norm.bias":
_SCREAMING_SNAKE_CASE : Optional[Any] = '''layernorm.bias'''
if "head" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("head", "classifier" )
else:
_SCREAMING_SNAKE_CASE : Tuple = '''swinv2.''' + name
return name
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : Optional[int] = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
_SCREAMING_SNAKE_CASE : Dict = key.split("." )
_SCREAMING_SNAKE_CASE : Any = int(key_split[1] )
_SCREAMING_SNAKE_CASE : List[Any] = int(key_split[3] )
_SCREAMING_SNAKE_CASE : List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_SCREAMING_SNAKE_CASE : Any = val[:dim, :]
_SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE : Tuple = val[:dim]
_SCREAMING_SNAKE_CASE : List[str] = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : str = val[-dim:]
else:
_SCREAMING_SNAKE_CASE : Tuple = val
return orig_state_dict
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = timm.create_model(__lowerCAmelCase, pretrained=__lowerCAmelCase )
timm_model.eval()
_SCREAMING_SNAKE_CASE : List[str] = get_swinva_config(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : int = SwinvaForImageClassification(__lowerCAmelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = convert_state_dict(timm_model.state_dict(), __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_", "-" ) ) )
_SCREAMING_SNAKE_CASE : str = Image.open(requests.get(__lowerCAmelCase, stream=__lowerCAmelCase ).raw )
_SCREAMING_SNAKE_CASE : Tuple = image_processor(images=__lowerCAmelCase, return_tensors="pt" )
_SCREAMING_SNAKE_CASE : Any = timm_model(inputs["pixel_values"] )
_SCREAMING_SNAKE_CASE : List[Any] = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase, __lowerCAmelCase, atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase, __lowerCAmelCase ), organization="nandwalritik", commit_message="Add model", )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ =parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 350
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 0
|
UpperCamelCase__ =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase__ =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase__ ={
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_SCREAMING_SNAKE_CASE : Optional[int] = year // 100
_SCREAMING_SNAKE_CASE : Any = (5 * (century % 4) + 2) % 7
_SCREAMING_SNAKE_CASE : str = year % 100
_SCREAMING_SNAKE_CASE : int = centurian % 12
_SCREAMING_SNAKE_CASE : Dict = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_SCREAMING_SNAKE_CASE : str = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_SCREAMING_SNAKE_CASE : List[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = old_name
if "patch_embed" in old_name:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = old_name.split("." )
if layer == "0":
_SCREAMING_SNAKE_CASE : Any = old_name.replace("0", "convolution1" )
elif layer == "1":
_SCREAMING_SNAKE_CASE : Dict = old_name.replace("1", "batchnorm_before" )
elif layer == "3":
_SCREAMING_SNAKE_CASE : List[Any] = old_name.replace("3", "convolution2" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = old_name.replace("4", "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d", __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = R"\b\d{2}\b"
if bool(re.search(__lowerCamelCase, __lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = re.search(R"\d\.\d\d.", __lowerCamelCase ).group()
else:
_SCREAMING_SNAKE_CASE : List[Any] = re.search(R"\d\.\d.", __lowerCamelCase ).group()
if int(match[0] ) < 6:
_SCREAMING_SNAKE_CASE : Tuple = old_name.replace(__lowerCamelCase, "" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_SCREAMING_SNAKE_CASE : List[Any] = "intermediate_stages." + trimmed_name
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = old_name.replace(__lowerCamelCase, "" )
if int(match[2] ) < num_meta4D_last_stage:
_SCREAMING_SNAKE_CASE : Dict = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2] )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
_SCREAMING_SNAKE_CASE : Any = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = trimmed_name.replace("norm1", "layernorm1" )
elif "norm2" in old_name:
_SCREAMING_SNAKE_CASE : Optional[int] = trimmed_name.replace("norm2", "layernorm2" )
elif "fc1" in old_name:
_SCREAMING_SNAKE_CASE : str = trimmed_name.replace("fc1", "linear_in" )
elif "fc2" in old_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = trimmed_name.replace("fc2", "linear_out" )
_SCREAMING_SNAKE_CASE : str = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d.", __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = old_name.replace("network", "intermediate_stages" )
if "fc" in new_name:
_SCREAMING_SNAKE_CASE : str = new_name.replace("fc", "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_SCREAMING_SNAKE_CASE : Tuple = new_name.replace("norm1", "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_SCREAMING_SNAKE_CASE : Any = new_name.replace("norm2", "batchnorm_after" )
if "proj" in new_name:
_SCREAMING_SNAKE_CASE : Any = new_name.replace("proj", "projection" )
if "dist_head" in new_name:
_SCREAMING_SNAKE_CASE : Optional[int] = new_name.replace("dist_head", "distillation_classifier" )
elif "head" in new_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_name.replace("head", "classifier" )
elif "patch_embed" in new_name:
_SCREAMING_SNAKE_CASE : str = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_SCREAMING_SNAKE_CASE : Any = new_name.replace("norm", "layernorm" )
_SCREAMING_SNAKE_CASE : int = "efficientformer." + new_name
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for key in checkpoint.copy().keys():
_SCREAMING_SNAKE_CASE : Dict = checkpoint.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = val
return checkpoint
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return image
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = torch.load(__lowerCamelCase, map_location="cpu" )["model"]
_SCREAMING_SNAKE_CASE : Dict = EfficientFormerConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_SCREAMING_SNAKE_CASE : List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
_SCREAMING_SNAKE_CASE : List[str] = convert_torch_checkpoint(__lowerCamelCase, __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
_SCREAMING_SNAKE_CASE : Optional[int] = 256
_SCREAMING_SNAKE_CASE : List[Any] = 224
_SCREAMING_SNAKE_CASE : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size}, crop_size={"height": crop_size, "width": crop_size}, resample=pillow_resamplings["bicubic"], )
_SCREAMING_SNAKE_CASE : Tuple = processor(images=__lowerCamelCase, return_tensors="pt" ).pixel_values
# original processing pipeline
_SCREAMING_SNAKE_CASE : Any = Compose(
[
Resize(__lowerCamelCase, interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCamelCase ),
ToTensor(),
Normalize(__lowerCamelCase, __lowerCamelCase ),
] )
_SCREAMING_SNAKE_CASE : Optional[int] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = outputs.logits
_SCREAMING_SNAKE_CASE : Tuple = (1, 1000)
if "l1" in model_name:
_SCREAMING_SNAKE_CASE : int = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10], __lowerCamelCase, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_SCREAMING_SNAKE_CASE : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10], __lowerCamelCase, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_SCREAMING_SNAKE_CASE : Dict = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCamelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""", commit_message="Add model", use_temp_dir=__lowerCamelCase, )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""", commit_message="Add image processor", use_temp_dir=__lowerCamelCase, )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__ =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 0
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=6_4 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=6_4 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = parent
_SCREAMING_SNAKE_CASE : List[str] = batch_size
_SCREAMING_SNAKE_CASE : List[str] = seq_length
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
_SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
_SCREAMING_SNAKE_CASE : Dict = use_labels
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Any = hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : str = num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = num_choices
_SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ) -> Optional[Any]:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : str = MPNetForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : int = MPNetForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Tuple = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = MPNetForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) : Union[str, Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__snake_case = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = MPNetModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCamelCase )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : str = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase__ =TypeVar('T')
class lowerCAmelCase__( Generic[T] ):
'''simple docstring'''
__snake_case = 4_2 # Cache store of keys
__snake_case = 4_2 # References of the keys in cache
__snake_case = 1_0 # Maximum capacity of cache
def __init__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = deque()
_SCREAMING_SNAKE_CASE : List[str] = set()
if not n:
_SCREAMING_SNAKE_CASE : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = n
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(_lowerCAmelCase )
else:
self.dq_store.remove(_lowerCAmelCase )
self.dq_store.appendleft(_lowerCAmelCase )
self.key_reference.add(_lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
for k in self.dq_store:
print(_lowerCAmelCase )
def __repr__( self ) -> Any:
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ =LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 354
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 0
|
import functools
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = len(_A )
_SCREAMING_SNAKE_CASE : Any = len(_A )
@functools.cache
def min_distance(__lowerCamelCase, __lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, _A ), 1 + min_distance(_A, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 0
|
"""simple docstring"""
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = " " ):
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for index, char in enumerate(__lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_SCREAMING_SNAKE_CASE : List[str] = index + 1
elif index + 1 == len(__lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 356
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 0
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : Any = controlnet_params
_SCREAMING_SNAKE_CASE : List[str] = '''bird'''
_SCREAMING_SNAKE_CASE : Dict = jax.device_count()
_SCREAMING_SNAKE_CASE : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
_SCREAMING_SNAKE_CASE : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_SCREAMING_SNAKE_CASE : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
_SCREAMING_SNAKE_CASE : str = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCamelCase__ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=5_0 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : Any = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : str = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
_SCREAMING_SNAKE_CASE : List[str] = controlnet_params
_SCREAMING_SNAKE_CASE : Optional[Any] = '''Chef in the kitchen'''
_SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
_SCREAMING_SNAKE_CASE : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_SCREAMING_SNAKE_CASE : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCamelCase__ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Any = replicate(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = shard(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=5_0 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
_SCREAMING_SNAKE_CASE : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : str = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 357
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase__( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = 'bart'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __lowerCamelCase=5_0_2_6_5 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=1_2 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_6 , __lowerCamelCase=1_2 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_6 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase="gelu" , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=0.0 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=3 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=True , __lowerCamelCase=2 , __lowerCamelCase=2 , **__lowerCamelCase , ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : str = d_model
_SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
_SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : Optional[int] = decoder_layers
_SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = dropout
_SCREAMING_SNAKE_CASE : str = attention_dropout
_SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = activation_function
_SCREAMING_SNAKE_CASE : Union[str, Any] = init_std
_SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
_SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
_SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
_SCREAMING_SNAKE_CASE : List[str] = encoder_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowercase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class lowerCAmelCase__( lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch"}
_SCREAMING_SNAKE_CASE : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "decoder_sequence"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers
for i in range(lowercase__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
_SCREAMING_SNAKE_CASE : str = {0: "batch", 2: "past_sequence + sequence"}
else:
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE : int = super().outputs
else:
_SCREAMING_SNAKE_CASE : List[str] = super(lowercase__ , self ).outputs
if self.use_past:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowercase__ ):
_SCREAMING_SNAKE_CASE : int = {0: "batch", 2: "past_sequence + sequence"}
_SCREAMING_SNAKE_CASE : Dict = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Generate decoder inputs
_SCREAMING_SNAKE_CASE : str = seq_length if not self.use_past else 1
_SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_SCREAMING_SNAKE_CASE : Optional[int] = dict(**lowercase__ , **lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = common_inputs["input_ids"].shape
_SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self.num_attention_heads
_SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_SCREAMING_SNAKE_CASE : Optional[int] = decoder_seq_length + 3
_SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowercase__ , lowercase__ )] , dim=1 )
_SCREAMING_SNAKE_CASE : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
_SCREAMING_SNAKE_CASE : Tuple = min(lowercase__ , lowercase__ )
_SCREAMING_SNAKE_CASE : Tuple = max(lowercase__ , lowercase__ ) - min_num_layers
_SCREAMING_SNAKE_CASE : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
_SCREAMING_SNAKE_CASE : Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowercase__ , lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> int:
_SCREAMING_SNAKE_CASE : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_SCREAMING_SNAKE_CASE : Optional[Any] = seqlen + 2
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_attention_heads
_SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_SCREAMING_SNAKE_CASE : Tuple = common_inputs["attention_mask"].dtype
_SCREAMING_SNAKE_CASE : Dict = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
_SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> List[str]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_SCREAMING_SNAKE_CASE : Optional[int] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_SCREAMING_SNAKE_CASE : Any = tokenizer.num_special_tokens_to_add(lowercase__ )
_SCREAMING_SNAKE_CASE : List[Any] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
_SCREAMING_SNAKE_CASE : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_SCREAMING_SNAKE_CASE : int = dict(tokenizer(lowercase__ , return_tensors=lowercase__ ) )
return common_inputs
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
elif self.task == "causal-lm":
_SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_causal_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
else:
_SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
return common_inputs
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_SCREAMING_SNAKE_CASE : str = super()._flatten_past_key_values_(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
_SCREAMING_SNAKE_CASE : Dict = super(lowercase__ , self )._flatten_past_key_values_(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
| 358
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 0
|
import datasets
from .evaluate import evaluate
UpperCamelCase__ ='\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
UpperCamelCase__ ='\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
UpperCamelCase__ ='\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_SCREAMING_SNAKE_CASE : Any = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_SCREAMING_SNAKE_CASE : int = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 359
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=3_3 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : int = batch_size
_SCREAMING_SNAKE_CASE : Tuple = seq_length
_SCREAMING_SNAKE_CASE : Optional[int] = is_training
_SCREAMING_SNAKE_CASE : Dict = use_input_mask
_SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
_SCREAMING_SNAKE_CASE : Tuple = num_choices
_SCREAMING_SNAKE_CASE : Any = scope
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> List[str]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = model(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : Tuple = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : str = config_and_inputs
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = False
__snake_case = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case = ()
__snake_case = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = EsmModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Any:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : str = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE : str = EsmEmbeddings(config=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_SCREAMING_SNAKE_CASE : List[Any] = create_position_ids_from_input_ids(__UpperCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase , __UpperCamelCase ) ) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = EsmEmbeddings(config=__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.empty(2 , 4 , 3_0 )
_SCREAMING_SNAKE_CASE : Optional[int] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_SCREAMING_SNAKE_CASE : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
_SCREAMING_SNAKE_CASE : Any = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase , __UpperCamelCase ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def UpperCamelCase_ ( self ) -> int:
pass
@unittest.skip("Esm does not support embedding resizing" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ ( self ) -> Dict:
pass
@require_torch
class lowerCAmelCase__( _lowercase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[str]:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_SCREAMING_SNAKE_CASE : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : Tuple = model(__UpperCamelCase )[0]
_SCREAMING_SNAKE_CASE : Tuple = 3_3
_SCREAMING_SNAKE_CASE : int = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[8.9215, -1_0.5_8_9_8, -6.4671], [-6.3967, -1_3.9_1_1_4, -1.1212], [-7.7812, -1_3.9_5_1_6, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Dict:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Dict = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_SCREAMING_SNAKE_CASE : Any = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 0
|
"""simple docstring"""
UpperCamelCase__ =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 361
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = {}
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1 ) -> str:
if self.graph.get(_a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_SCREAMING_SNAKE_CASE : int = [[w, v]]
if not self.graph.get(_a ):
_SCREAMING_SNAKE_CASE : List[Any] = []
def UpperCamelCase_ ( self ) -> int:
return list(self.graph )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
def UpperCamelCase_ ( self , __lowerCamelCase=-2 , __lowerCamelCase=-1 ) -> List[str]:
if s == d:
return []
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : List[Any] = []
if s == -2:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : Any = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def UpperCamelCase_ ( self , __lowerCamelCase=-1 ) -> Tuple:
if c == -1:
_SCREAMING_SNAKE_CASE : Optional[Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_SCREAMING_SNAKE_CASE : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def UpperCamelCase_ ( self , __lowerCamelCase=-2 ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = deque()
_SCREAMING_SNAKE_CASE : Dict = []
if s == -2:
_SCREAMING_SNAKE_CASE : int = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
_SCREAMING_SNAKE_CASE : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return len(self.graph[u] )
def UpperCamelCase_ ( self , __lowerCamelCase=-2 ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
if s == -2:
_SCREAMING_SNAKE_CASE : int = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : Optional[int] = s
_SCREAMING_SNAKE_CASE : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : List[Any] = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : Any = ss
# check if se have reached the starting point
if len(_a ) == 0:
return sorted_nodes
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = -2
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Tuple = s
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE : Optional[Any] = True
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : Tuple = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = False
indirect_parents.append(_a )
_SCREAMING_SNAKE_CASE : int = s
_SCREAMING_SNAKE_CASE : Tuple = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Optional[int] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : Dict = -2
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : str = s
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE : Optional[int] = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE : Any = True
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : Dict = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : Optional[int] = False
indirect_parents.append(_a )
_SCREAMING_SNAKE_CASE : int = s
_SCREAMING_SNAKE_CASE : List[str] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def UpperCamelCase_ ( self , __lowerCamelCase=-2 , __lowerCamelCase=-1 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = time()
self.dfs(_a , _a )
_SCREAMING_SNAKE_CASE : str = time()
return end - begin
def UpperCamelCase_ ( self , __lowerCamelCase=-2 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = time()
self.bfs(_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = time()
return end - begin
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1 ) -> Any:
if self.graph.get(_a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_SCREAMING_SNAKE_CASE : List[str] = [[w, v]]
# add the other way
if self.graph.get(_a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_SCREAMING_SNAKE_CASE : int = [[w, u]]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
# the other way round
if self.graph.get(_a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_a )
def UpperCamelCase_ ( self , __lowerCamelCase=-2 , __lowerCamelCase=-1 ) -> int:
if s == d:
return []
_SCREAMING_SNAKE_CASE : Any = []
_SCREAMING_SNAKE_CASE : int = []
if s == -2:
_SCREAMING_SNAKE_CASE : Dict = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : Optional[int] = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : int = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def UpperCamelCase_ ( self , __lowerCamelCase=-1 ) -> List[Any]:
if c == -1:
_SCREAMING_SNAKE_CASE : Dict = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_SCREAMING_SNAKE_CASE : Any = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def UpperCamelCase_ ( self , __lowerCamelCase=-2 ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = deque()
_SCREAMING_SNAKE_CASE : Optional[int] = []
if s == -2:
_SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
_SCREAMING_SNAKE_CASE : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
return len(self.graph[u] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : int = -2
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = s
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE : Any = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE : Dict = True
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : int = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : Dict = False
indirect_parents.append(_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = s
_SCREAMING_SNAKE_CASE : Any = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_SCREAMING_SNAKE_CASE : List[str] = -2
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Dict = s
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE : str = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE : Optional[Any] = True
if len(_a ) != 0:
_SCREAMING_SNAKE_CASE : List[Any] = stack[len(_a ) - 1]
else:
_SCREAMING_SNAKE_CASE : List[Any] = False
indirect_parents.append(_a )
_SCREAMING_SNAKE_CASE : Optional[int] = s
_SCREAMING_SNAKE_CASE : List[str] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def UpperCamelCase_ ( self ) -> List[str]:
return list(self.graph )
def UpperCamelCase_ ( self , __lowerCamelCase=-2 , __lowerCamelCase=-1 ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = time()
self.dfs(_a , _a )
_SCREAMING_SNAKE_CASE : str = time()
return end - begin
def UpperCamelCase_ ( self , __lowerCamelCase=-2 ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = time()
self.bfs(_a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = time()
return end - begin
| 362
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 0
|
UpperCamelCase__ =[
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase__ =[
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase__ =[
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase__ =[
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase__ =[
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase__ =[
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase__ =[
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase__ =[
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 363
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCAmelCase__( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> int:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = self._create_example_records()
_SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i] )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = self._create_example_records()
_SCREAMING_SNAKE_CASE : int = Dataset.from_list(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCamelCase_ ( self ) -> Dict: # checks what happens with missing columns
_SCREAMING_SNAKE_CASE : Optional[int] = [{"col_1": 1}, {"col_2": "x"}]
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def UpperCamelCase_ ( self ) -> Optional[int]: # checks if the type can be inferred from the second record
_SCREAMING_SNAKE_CASE : Optional[Any] = [{"col_1": []}, {"col_1": [1, 2]}]
_SCREAMING_SNAKE_CASE : Any = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 364
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_enforce_args(_a, _a )
if n == 0:
return 0
_SCREAMING_SNAKE_CASE : int = float("-inf" )
for i in range(1, n + 1 ):
_SCREAMING_SNAKE_CASE : str = max(
_a, prices[i - 1] + naive_cut_rod_recursive(n - i, _a ) )
return max_revue
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_enforce_args(_a, _a )
_SCREAMING_SNAKE_CASE : Tuple = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_a, _a, _a )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_SCREAMING_SNAKE_CASE : Dict = float("-inf" )
for i in range(1, n + 1 ):
_SCREAMING_SNAKE_CASE : Optional[int] = max(
_a, prices[i - 1] + _top_down_cut_rod_recursive(n - i, _a, _a ), )
_SCREAMING_SNAKE_CASE : str = max_revenue
return max_rev[n]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_enforce_args(_a, _a )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_SCREAMING_SNAKE_CASE : Tuple = [float("-inf" ) for _ in range(n + 1 )]
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1, n + 1 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_rev[i]
for j in range(1, i + 1 ):
_SCREAMING_SNAKE_CASE : Optional[int] = max(_a, prices[j - 1] + max_rev[i - j] )
_SCREAMING_SNAKE_CASE : str = max_revenue_i
return max_rev[n]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if n < 0:
_SCREAMING_SNAKE_CASE : int = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_a )
if n > len(_a ):
_SCREAMING_SNAKE_CASE : List[str] = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(_a )}"""
)
raise ValueError(_a )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Dict = [6, 10, 12, 15, 20, 23]
_SCREAMING_SNAKE_CASE : str = len(_a )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_SCREAMING_SNAKE_CASE : Any = 36
_SCREAMING_SNAKE_CASE : Optional[Any] = top_down_cut_rod(_a, _a )
_SCREAMING_SNAKE_CASE : str = bottom_up_cut_rod(_a, _a )
_SCREAMING_SNAKE_CASE : Dict = naive_cut_rod_recursive(_a, _a )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 365
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ =OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
UpperCamelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase__ (__lowerCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_SCREAMING_SNAKE_CASE : List[Any] = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module(f""".{module_name}""", "transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_, "__name__", SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = False, __lowerCamelCase = False, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = False, **__lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Optional[Any] = get_file_from_repo(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, force_download=SCREAMING_SNAKE_CASE_, resume_download=SCREAMING_SNAKE_CASE_, proxies=SCREAMING_SNAKE_CASE_, use_auth_token=SCREAMING_SNAKE_CASE_, revision=SCREAMING_SNAKE_CASE_, local_files_only=SCREAMING_SNAKE_CASE_, )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE_, encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> List[str]:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("config" , _lowercase )
_SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("trust_remote_code" , _lowercase )
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase )
_SCREAMING_SNAKE_CASE : List[str] = config_dict.get("image_processor_type" , _lowercase )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_SCREAMING_SNAKE_CASE : int = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_SCREAMING_SNAKE_CASE : str = config_dict.pop("feature_extractor_type" , _lowercase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_SCREAMING_SNAKE_CASE : List[str] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_SCREAMING_SNAKE_CASE : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Any = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
_SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.image_processor_type``
_SCREAMING_SNAKE_CASE : int = getattr(_lowercase , "image_processor_type" , _lowercase )
if hasattr(_lowercase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_SCREAMING_SNAKE_CASE : Optional[int] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = image_processor_class_from_name(_lowercase )
_SCREAMING_SNAKE_CASE : Any = image_processor_auto_map is not None
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING
_SCREAMING_SNAKE_CASE : Dict = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
_SCREAMING_SNAKE_CASE : Optional[int] = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("code_revision" , _lowercase )
if os.path.isdir(_lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowercase , **_lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING:
_SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_PROCESSOR_MAPPING[type(_lowercase )]
return image_processor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
| 366
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 0
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowerCAmelCase__( lowerCamelCase_ ):
'''simple docstring'''
__snake_case = """xlm-prophetnet"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , __lowerCamelCase = 0.1 , __lowerCamelCase = "gelu" , __lowerCamelCase = 3_0_5_2_2 , __lowerCamelCase = 1_0_2_4 , __lowerCamelCase = 4_0_9_6 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_6 , __lowerCamelCase = 4_0_9_6 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_6 , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 5_1_2 , __lowerCamelCase = 0.02 , __lowerCamelCase = True , __lowerCamelCase = True , __lowerCamelCase = 0 , __lowerCamelCase = 2 , __lowerCamelCase = 3_2 , __lowerCamelCase = 1_2_8 , __lowerCamelCase = False , __lowerCamelCase = 0.0 , __lowerCamelCase = True , __lowerCamelCase = 0 , __lowerCamelCase = 1 , __lowerCamelCase = 2 , **__lowerCamelCase , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : int = encoder_ffn_dim
_SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_layers
_SCREAMING_SNAKE_CASE : Dict = num_encoder_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : Optional[int] = num_decoder_layers
_SCREAMING_SNAKE_CASE : int = num_decoder_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = init_std # Normal(0, this parameter)
_SCREAMING_SNAKE_CASE : int = activation_function
# parameters for xlmprophetnet
_SCREAMING_SNAKE_CASE : Union[str, Any] = ngram
_SCREAMING_SNAKE_CASE : List[str] = num_buckets
_SCREAMING_SNAKE_CASE : Tuple = relative_max_distance
_SCREAMING_SNAKE_CASE : List[Any] = disable_ngram_loss
_SCREAMING_SNAKE_CASE : List[str] = eps
# 3 Types of Dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
_SCREAMING_SNAKE_CASE : List[str] = dropout
_SCREAMING_SNAKE_CASE : Tuple = use_cache
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , add_cross_attention=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 367
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ =logging.getLogger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = np.argmax(__lowerCamelCase, axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase__ (__lowerCamelCase ):
with open(__lowerCamelCase, encoding="utf_8" ) as f:
_SCREAMING_SNAKE_CASE : Optional[int] = csv.reader(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = []
next(__lowerCamelCase ) # skip the first line
for line in tqdm(__lowerCamelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = []
for dataset in encoded_datasets:
_SCREAMING_SNAKE_CASE : str = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
_SCREAMING_SNAKE_CASE : str = np.zeros((n_batch, 2), dtype=np.intaa )
_SCREAMING_SNAKE_CASE : List[str] = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_SCREAMING_SNAKE_CASE : Optional[int] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_SCREAMING_SNAKE_CASE : Union[str, Any] = with_conta
_SCREAMING_SNAKE_CASE : List[str] = with_conta
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) - 1
_SCREAMING_SNAKE_CASE : str = len(__lowerCamelCase ) - 1
_SCREAMING_SNAKE_CASE : List[Any] = with_conta
_SCREAMING_SNAKE_CASE : Optional[Any] = with_conta
_SCREAMING_SNAKE_CASE : Optional[Any] = mc_label
_SCREAMING_SNAKE_CASE : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("--model_name", type=__lowerCamelCase, default="openai-gpt", help="pretrained model name" )
parser.add_argument("--do_train", action="store_true", help="Whether to run training." )
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="The output directory where the model predictions and checkpoints will be written.", )
parser.add_argument("--train_dataset", type=__lowerCamelCase, default="" )
parser.add_argument("--eval_dataset", type=__lowerCamelCase, default="" )
parser.add_argument("--seed", type=__lowerCamelCase, default=42 )
parser.add_argument("--num_train_epochs", type=__lowerCamelCase, default=3 )
parser.add_argument("--train_batch_size", type=__lowerCamelCase, default=8 )
parser.add_argument("--eval_batch_size", type=__lowerCamelCase, default=16 )
parser.add_argument("--adam_epsilon", default=1e-8, type=__lowerCamelCase, help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm", type=__lowerCamelCase, default=1 )
parser.add_argument(
"--max_steps", default=-1, type=__lowerCamelCase, help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
), )
parser.add_argument(
"--gradient_accumulation_steps", type=__lowerCamelCase, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", )
parser.add_argument("--learning_rate", type=__lowerCamelCase, default=6.25e-5 )
parser.add_argument("--warmup_steps", default=0, type=__lowerCamelCase, help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule", type=__lowerCamelCase, default="warmup_linear" )
parser.add_argument("--weight_decay", type=__lowerCamelCase, default=0.01 )
parser.add_argument("--lm_coef", type=__lowerCamelCase, default=0.9 )
parser.add_argument("--n_valid", type=__lowerCamelCase, default=374 )
parser.add_argument("--server_ip", type=__lowerCamelCase, default="", help="Can be used for distant debugging." )
parser.add_argument("--server_port", type=__lowerCamelCase, default="", help="Can be used for distant debugging." )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
print(__lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_SCREAMING_SNAKE_CASE : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_SCREAMING_SNAKE_CASE : List[str] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(__lowerCamelCase, __lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_SCREAMING_SNAKE_CASE : List[str] = ["""_start_""", """_delimiter_""", """_classify_"""]
_SCREAMING_SNAKE_CASE : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCamelCase ) )
model.to(__lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCamelCase ):
if isinstance(__lowerCamelCase, __lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCamelCase ) )
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
return obj
return [tokenize_and_encode(__lowerCamelCase ) for o in obj]
logger.info("Encoding dataset..." )
_SCREAMING_SNAKE_CASE : Tuple = load_rocstories_dataset(args.train_dataset )
_SCREAMING_SNAKE_CASE : int = load_rocstories_dataset(args.eval_dataset )
_SCREAMING_SNAKE_CASE : str = (train_dataset, eval_dataset)
_SCREAMING_SNAKE_CASE : Optional[int] = tokenize_and_encode(__lowerCamelCase )
# Compute the max input length for the Transformer
_SCREAMING_SNAKE_CASE : Optional[Any] = model.config.n_positions // 2 - 2
_SCREAMING_SNAKE_CASE : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_SCREAMING_SNAKE_CASE : List[str] = pre_process_datasets(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, *__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = tensor_datasets[0], tensor_datasets[1]
_SCREAMING_SNAKE_CASE : Dict = TensorDataset(*__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = RandomSampler(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(__lowerCamelCase, sampler=__lowerCamelCase, batch_size=args.train_batch_size )
_SCREAMING_SNAKE_CASE : int = TensorDataset(*__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = SequentialSampler(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = DataLoader(__lowerCamelCase, sampler=__lowerCamelCase, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_SCREAMING_SNAKE_CASE : Dict = args.max_steps
_SCREAMING_SNAKE_CASE : Dict = args.max_steps // (len(__lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = len(__lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
_SCREAMING_SNAKE_CASE : Tuple = list(model.named_parameters() )
_SCREAMING_SNAKE_CASE : Dict = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
_SCREAMING_SNAKE_CASE : Any = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
_SCREAMING_SNAKE_CASE : Dict = AdamW(__lowerCamelCase, lr=args.learning_rate, eps=args.adam_epsilon )
_SCREAMING_SNAKE_CASE : Any = get_linear_schedule_with_warmup(
__lowerCamelCase, num_warmup_steps=args.warmup_steps, num_training_steps=__lowerCamelCase )
if args.do_train:
_SCREAMING_SNAKE_CASE : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="Epoch" ):
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Any = tqdm(__lowerCamelCase, desc="Training" )
for step, batch in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = tuple(t.to(__lowerCamelCase ) for t in batch )
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase, mc_token_ids=__lowerCamelCase, lm_labels=__lowerCamelCase, mc_labels=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_SCREAMING_SNAKE_CASE : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_SCREAMING_SNAKE_CASE : Dict = """Training loss: {:.2e} lr: {:.2e}""".format(__lowerCamelCase, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = model.module if hasattr(__lowerCamelCase, "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_SCREAMING_SNAKE_CASE : Any = os.path.join(args.output_dir, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(args.output_dir, __lowerCamelCase )
torch.save(model_to_save.state_dict(), __lowerCamelCase )
model_to_save.config.to_json_file(__lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_SCREAMING_SNAKE_CASE : Any = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCamelCase )
if args.do_eval:
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = 0, 0
_SCREAMING_SNAKE_CASE : str = 0, 0
for batch in tqdm(__lowerCamelCase, desc="Evaluating" ):
_SCREAMING_SNAKE_CASE : Optional[int] = tuple(t.to(__lowerCamelCase ) for t in batch )
_SCREAMING_SNAKE_CASE : Dict = batch
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(
__lowerCamelCase, mc_token_ids=__lowerCamelCase, lm_labels=__lowerCamelCase, mc_labels=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = mc_logits.detach().cpu().numpy()
_SCREAMING_SNAKE_CASE : int = mc_labels.to("cpu" ).numpy()
_SCREAMING_SNAKE_CASE : str = accuracy(__lowerCamelCase, __lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_SCREAMING_SNAKE_CASE : Optional[int] = eval_loss / nb_eval_steps
_SCREAMING_SNAKE_CASE : Union[str, Any] = eval_accuracy / nb_eval_examples
_SCREAMING_SNAKE_CASE : Any = tr_loss / nb_tr_steps if args.do_train else None
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
_SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir, "eval_results.txt" )
with open(__lowerCamelCase, "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s", __lowerCamelCase, str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 368
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=4 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : Any = use_attention_mask
_SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = num_choices
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Tuple = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ ( self ) -> str:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowercase_ )
_SCREAMING_SNAKE_CASE : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_SCREAMING_SNAKE_CASE : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : Tuple = model(lowercase_ )[0]
_SCREAMING_SNAKE_CASE : List[str] = 5_0_0_0_0
_SCREAMING_SNAKE_CASE : int = (1, 6, vocab_size)
self.assertEqual(output.shape , lowercase_ )
_SCREAMING_SNAKE_CASE : Tuple = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 369
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = TaConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : Dict = TaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase__ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 370
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 371
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ =r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class lowerCAmelCase__( a_ ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCAmelCase__( a_ ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = max_length
_SCREAMING_SNAKE_CASE : int = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> bool:
_SCREAMING_SNAKE_CASE : Dict = input_ids.shape[-1]
_SCREAMING_SNAKE_CASE : Tuple = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCAmelCase__( a_ ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> str:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = start_length
_SCREAMING_SNAKE_CASE : List[Any] = max_new_tokens
_SCREAMING_SNAKE_CASE : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase__( a_ ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = None ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = max_time
_SCREAMING_SNAKE_CASE : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase__( a_ ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = stopping_criteria.max_length
_SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowerCamelCase__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", lowerCamelCase__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCamelCase__ ) )
return new_stopping_criteria
| 350
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 0
|
from __future__ import annotations
import requests
UpperCamelCase__ =set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = 1, __lowerCamelCase = "new", __lowerCamelCase = None ):
_SCREAMING_SNAKE_CASE : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_SCREAMING_SNAKE_CASE ) - valid_terms ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""", headers={"User-agent": "A random string"}, )
if response.status_code == 429:
raise requests.HTTPError
_SCREAMING_SNAKE_CASE : int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_SCREAMING_SNAKE_CASE )}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for id_ in range(_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : int = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 351
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : Any = (3_2, 3_2)
_SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Tuple:
def extract(*__lowerCamelCase , **__lowerCamelCase ):
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = torch.ones([0] )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : List[str] = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_SCREAMING_SNAKE_CASE : int = 7_7
_SCREAMING_SNAKE_CASE : Tuple = self.dummy_image.to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE : Any = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = '''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
_SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = alt_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_SCREAMING_SNAKE_CASE : int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_cond_unet
_SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.dummy_vae
_SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_SCREAMING_SNAKE_CASE : Dict = 7_7
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image.to(__lowerCamelCase )
# put models in fp16
_SCREAMING_SNAKE_CASE : Tuple = unet.half()
_SCREAMING_SNAKE_CASE : List[Any] = vae.half()
_SCREAMING_SNAKE_CASE : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
_SCREAMING_SNAKE_CASE : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
_SCREAMING_SNAKE_CASE : Optional[int] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = '''A painting of a squirrel eating a burger'''
_SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe(
[prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , image=__lowerCamelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_SCREAMING_SNAKE_CASE : Dict = init_image.resize((7_6_0, 5_0_4) )
_SCREAMING_SNAKE_CASE : Any = '''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : Optional[int] = '''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
_SCREAMING_SNAKE_CASE : int = output.images[0]
_SCREAMING_SNAKE_CASE : Tuple = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_SCREAMING_SNAKE_CASE : List[str] = init_image.resize((7_6_8, 5_1_2) )
_SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_SCREAMING_SNAKE_CASE : Any = '''BAAI/AltDiffusion'''
_SCREAMING_SNAKE_CASE : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase , safety_checker=__lowerCamelCase , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
_SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCamelCase , output_type="np" , )
_SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'timesformer'
def __init__( self , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=8 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-6 , __lowerCamelCase=True , __lowerCamelCase="divided_space_time" , __lowerCamelCase=0 , **__lowerCamelCase , ) -> List[str]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : str = num_channels
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = qkv_bias
_SCREAMING_SNAKE_CASE : Tuple = attention_type
_SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
| 325
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase__ =0
UpperCamelCase__ =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase__ =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase__ =tuple[int, int]
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> str:
_SCREAMING_SNAKE_CASE : Any = pos_x
_SCREAMING_SNAKE_CASE : Dict = pos_y
_SCREAMING_SNAKE_CASE : Dict = (pos_y, pos_x)
_SCREAMING_SNAKE_CASE : List[str] = goal_x
_SCREAMING_SNAKE_CASE : str = goal_y
_SCREAMING_SNAKE_CASE : Tuple = g_cost
_SCREAMING_SNAKE_CASE : Dict = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.calculate_heuristic()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.g_cost + self.h_cost
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.pos_x - self.goal_x
_SCREAMING_SNAKE_CASE : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCamelCase ) + abs(__lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __lowerCamelCase ) -> Optional[Any]:
return self.f_cost < other.f_cost
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = [self.start]
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCamelCase_ ( self ) -> Optional[Any]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
_SCREAMING_SNAKE_CASE : Dict = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
return [self.start.pos]
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = []
for action in delta:
_SCREAMING_SNAKE_CASE : List[str] = parent.pos_x + action[1]
_SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = node
_SCREAMING_SNAKE_CASE : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_SCREAMING_SNAKE_CASE : int = current_node.parent
path.reverse()
return path
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AStar(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = AStar(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCamelCase_ ( self ) -> Tuple:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 )
_SCREAMING_SNAKE_CASE : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCamelCase , __lowerCamelCase )
self.fwd_astar.closed_nodes.append(__lowerCamelCase )
self.bwd_astar.closed_nodes.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = current_bwd_node
_SCREAMING_SNAKE_CASE : List[str] = current_fwd_node
_SCREAMING_SNAKE_CASE : Any = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
_SCREAMING_SNAKE_CASE : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCamelCase )
else:
astar.open_nodes.append(__lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : str = self.fwd_astar.retrace_path(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.bwd_astar.retrace_path(__lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_SCREAMING_SNAKE_CASE : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase__ =(0, 0)
UpperCamelCase__ =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase__ =time.time()
UpperCamelCase__ =AStar(init, goal)
UpperCamelCase__ =a_star.search()
UpperCamelCase__ =time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
UpperCamelCase__ =time.time()
UpperCamelCase__ =BidirectionalAStar(init, goal)
UpperCamelCase__ =time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCamelCase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class lowerCAmelCase__( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = 4_2
class lowerCAmelCase__( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
super().__init__()
self.register_modules(
prior=__lowerCamelCase , image_encoder=__lowerCamelCase , image_processor=__lowerCamelCase , scheduler=__lowerCamelCase , renderer=__lowerCamelCase , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
if latents is None:
_SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = latents.to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase_ ( self , __lowerCamelCase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_SCREAMING_SNAKE_CASE : Any = torch.device(F"""cuda:{gpu_id}""" )
_SCREAMING_SNAKE_CASE : str = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Dict:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(image[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE : List[str] = torch.cat(__lowerCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCamelCase , axis=0 )
if not isinstance(__lowerCamelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE : Dict = self.image_processor(__lowerCamelCase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_SCREAMING_SNAKE_CASE : List[str] = image.to(dtype=self.image_encoder.dtype , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.image_encoder(__lowerCamelCase )["last_hidden_state"]
_SCREAMING_SNAKE_CASE : int = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_SCREAMING_SNAKE_CASE : Optional[Any] = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros_like(__lowerCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCamelCase )
def __call__( self , __lowerCamelCase , __lowerCamelCase = 1 , __lowerCamelCase = 2_5 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 4.0 , __lowerCamelCase = 6_4 , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> str:
if isinstance(__lowerCamelCase , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor ):
_SCREAMING_SNAKE_CASE : List[Any] = image.shape[0]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_SCREAMING_SNAKE_CASE : str = len(__lowerCamelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCamelCase )}""" )
_SCREAMING_SNAKE_CASE : int = self._execution_device
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size * num_images_per_prompt
_SCREAMING_SNAKE_CASE : Dict = guidance_scale > 1.0
_SCREAMING_SNAKE_CASE : Tuple = self._encode_image(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# prior
self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE : List[Any] = self.prior.config.num_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = self.prior.config.embedding_dim
_SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_SCREAMING_SNAKE_CASE : List[Any] = latents.reshape(latents.shape[0] , __lowerCamelCase , __lowerCamelCase )
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prior(
__lowerCamelCase , timestep=__lowerCamelCase , proj_embedding=__lowerCamelCase , ).predicted_image_embedding
# remove the variance
_SCREAMING_SNAKE_CASE : Dict = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_SCREAMING_SNAKE_CASE : List[str] = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_SCREAMING_SNAKE_CASE : Any = self.scheduler.step(
__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, latent in enumerate(__lowerCamelCase ):
print()
_SCREAMING_SNAKE_CASE : str = self.renderer.decode(
latent[None, :] , __lowerCamelCase , size=__lowerCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.stack(__lowerCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
_SCREAMING_SNAKE_CASE : str = images.cpu().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Any = [self.numpy_to_pil(__lowerCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCamelCase )
| 354
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase__ =logging.get_logger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 325
| 0
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
create_all_state(1, __lowerCamelCase, __lowerCamelCase, [], __lowerCamelCase )
return result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCamelCase, total_number - level + 2 ):
current_list.append(__lowerCamelCase )
create_all_state(i + 1, __lowerCamelCase, level - 1, __lowerCamelCase, __lowerCamelCase )
current_list.pop()
def lowerCamelCase__ (__lowerCamelCase ):
for i in total_list:
print(*__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =4
UpperCamelCase__ =2
UpperCamelCase__ =generate_all_combinations(n, k)
print_all_state(total_list)
| 355
|
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325
| 0
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase=None, __lowerCamelCase=None ):
return field(default_factory=lambda: default, metadata=__UpperCAmelCase )
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__snake_case = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__snake_case = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__snake_case = field(
default=_lowerCamelCase , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__snake_case = field(
default=_lowerCamelCase , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__snake_case = field(
default=_lowerCamelCase , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Use FP16 to accelerate inference.'} )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Benchmark training of model'} )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Verbose memory tracing'} )
__snake_case = field(
default=_lowerCamelCase , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__snake_case = field(
default=_lowerCamelCase , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Trace memory line by line'} )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Save result to a CSV file'} )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Save all print statements in a log file'} )
__snake_case = field(default=_lowerCamelCase , metadata={'help': 'Whether to print environment information'} )
__snake_case = field(
default=_lowerCamelCase , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__snake_case = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__snake_case = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__snake_case = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__snake_case = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__snake_case = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving environment information.'} , )
__snake_case = field(
default=F"""log_{round(time() )}.csv""" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__snake_case = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__snake_case = field(
default=_lowerCamelCase , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def UpperCamelCase_ ( self ) -> str:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , lowercase_ , )
def UpperCamelCase_ ( self ) -> Dict:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self ) -> Any:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 356
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 0
|
from typing import Any
import numpy as np
def lowerCamelCase__ (__lowerCamelCase ):
return np.array_equal(lowercase_, matrix.conjugate().T )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = v.conjugate().T
_SCREAMING_SNAKE_CASE : Tuple = v_star.dot(lowercase_ )
assert isinstance(lowercase_, np.ndarray )
return (v_star_dot.dot(lowercase_ )) / (v_star.dot(lowercase_ ))
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Tuple = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_SCREAMING_SNAKE_CASE : Dict = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase_ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase_, lowercase_ ) )
_SCREAMING_SNAKE_CASE : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase_ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase_, lowercase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 357
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCamelCase__ =[
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
UpperCamelCase__ =[
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
UpperCamelCase__ =(
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCamelCase__ =(
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCamelCase__ =[
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for tf_name, hf_name in patterns:
_SCREAMING_SNAKE_CASE : List[Any] = k.replace(A_, A_ )
return k
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = BigBirdPegasusConfig(**A_ )
_SCREAMING_SNAKE_CASE : Tuple = BigBirdPegasusForConditionalGeneration(A_ )
_SCREAMING_SNAKE_CASE : Dict = torch_model.state_dict()
_SCREAMING_SNAKE_CASE : str = {}
# separating decoder weights
_SCREAMING_SNAKE_CASE : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_SCREAMING_SNAKE_CASE : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
_SCREAMING_SNAKE_CASE : Any = [k.endswith(A_ ) for ending in KEYS_TO_IGNORE]
if any(A_ ):
continue
_SCREAMING_SNAKE_CASE : List[Any] = DECODER_PATTERNS
_SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(A_, A_ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_SCREAMING_SNAKE_CASE : List[Any] = v.T
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(A_ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [k.endswith(A_ ) for ending in KEYS_TO_IGNORE]
if any(A_ ):
continue
_SCREAMING_SNAKE_CASE : List[Any] = REMAINING_PATTERNS
_SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(A_, A_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_SCREAMING_SNAKE_CASE : Optional[Any] = v.T
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(A_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''model.embed_positions.weight''']
_SCREAMING_SNAKE_CASE : Union[str, Any] = mapping.pop("model.embed_positions.weight" )
_SCREAMING_SNAKE_CASE : Dict = torch_model.load_state_dict(A_, strict=A_ )
_SCREAMING_SNAKE_CASE : Optional[int] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = tf.train.list_variables(A_ )
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : List[Any] = ['''global_step''']
for name, shape in tqdm(A_, desc="converting tf checkpoint to dict" ):
_SCREAMING_SNAKE_CASE : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.load_variable(A_, A_ )
_SCREAMING_SNAKE_CASE : Tuple = array
return tf_weights
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = get_tf_weights_as_numpy(A_ )
_SCREAMING_SNAKE_CASE : str = convert_bigbird_pegasus(A_, A_ )
torch_model.save_pretrained(A_ )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ =parser.parse_args()
UpperCamelCase__ ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 358
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = args.pruning_method
_SCREAMING_SNAKE_CASE : Tuple = args.threshold
_SCREAMING_SNAKE_CASE : str = args.model_name_or_path.rstrip("/" )
_SCREAMING_SNAKE_CASE : Tuple = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.load(os.path.join(_lowercase, "pytorch_model.bin" ) )
_SCREAMING_SNAKE_CASE : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_SCREAMING_SNAKE_CASE : int = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_SCREAMING_SNAKE_CASE : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_SCREAMING_SNAKE_CASE : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_SCREAMING_SNAKE_CASE : str = name[:-6]
_SCREAMING_SNAKE_CASE : Dict = model[f"""{prefix_}mask_scores"""]
_SCREAMING_SNAKE_CASE : Tuple = TopKBinarizer.apply(_lowercase, _lowercase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_SCREAMING_SNAKE_CASE : int = name[:-6]
_SCREAMING_SNAKE_CASE : List[Any] = model[f"""{prefix_}mask_scores"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_SCREAMING_SNAKE_CASE : int = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_SCREAMING_SNAKE_CASE : str = name[:-6]
_SCREAMING_SNAKE_CASE : Tuple = model[f"""{prefix_}mask_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = -0.1, 1.1
_SCREAMING_SNAKE_CASE : Any = torch.sigmoid(_lowercase )
_SCREAMING_SNAKE_CASE : Optional[int] = s * (r - l) + l
_SCREAMING_SNAKE_CASE : Tuple = s_bar.clamp(min=0.0, max=1.0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f"""bertarized_{os.path.basename(_lowercase )}""" )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowercase, os.path.join(_lowercase, "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
UpperCamelCase__ =parser.parse_args()
main(args)
| 359
|
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 325
| 0
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase = 4 ):
_SCREAMING_SNAKE_CASE : Dict = abs(__lowerCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )]
def lowerCamelCase__ (__lowerCamelCase ):
return reverse_row(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase__ (__lowerCamelCase ):
return reverse_row(reverse_column(__lowerCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase__ (__lowerCamelCase ):
return reverse_column(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )]
return matrix
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = matrix[::-1]
return matrix
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase__ (__lowerCamelCase ):
for i in matrix:
print(*__lowerCamelCase )
if __name__ == "__main__":
lowercase__ =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowercase__ =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowercase__ =make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if len(_a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_a )
or left < -len(_a )
or right >= len(_a )
or right < -len(_a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
_SCREAMING_SNAKE_CASE : Optional[int] = (left + right) >> 1 # the middle
_SCREAMING_SNAKE_CASE : List[str] = find_max(_a, _a, _a ) # find max in range[left, mid]
_SCREAMING_SNAKE_CASE : str = find_max(_a, mid + 1, _a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 361
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ =[0, 25, 50]
UpperCamelCase__ =[25, 50, 75]
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
UpperCamelCase__ =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ =np.ones(75)
UpperCamelCase__ =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 325
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotConfig
__snake_case = {}
__snake_case = """gelu"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : Optional[int] = batch_size
_SCREAMING_SNAKE_CASE : int = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = eos_token_id
_SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : Tuple = bos_token_id
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = TFBlenderbotModel(config=snake_case__ ).get_decoder()
_SCREAMING_SNAKE_CASE : int = inputs_dict['''input_ids''']
_SCREAMING_SNAKE_CASE : Optional[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict['''attention_mask'''][:1, :]
_SCREAMING_SNAKE_CASE : Dict = inputs_dict['''head_mask''']
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
# first forward pass
_SCREAMING_SNAKE_CASE : List[Any] = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = model(snake_case__ , attention_mask=snake_case__ )[0]
_SCREAMING_SNAKE_CASE : Optional[int] = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(tf.math.not_equal(__lowerCAmelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__snake_case = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = TFBlenderbotModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = ["""My friends are cool but they eat too many carbs."""]
__snake_case = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase_ ( self ) -> int:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
_SCREAMING_SNAKE_CASE : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 362
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['vqvae']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase , mel=__lowerCamelCase , vqvae=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
return 5_0 if isinstance(self.scheduler , __lowerCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self , __lowerCamelCase = 1 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_SCREAMING_SNAKE_CASE : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_SCREAMING_SNAKE_CASE : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__lowerCamelCase , device=self.device , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = noise
_SCREAMING_SNAKE_CASE : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.mel.audio_slice_to_image(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_SCREAMING_SNAKE_CASE : Optional[int] = (input_image / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(__lowerCamelCase , 0 ) ).latent_dist.sample(
generator=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , self.scheduler.timesteps[start_step - 1] )
_SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_SCREAMING_SNAKE_CASE : Optional[Any] = int(mask_start_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[int] = int(mask_end_secs * pixels_per_second )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["sample"]
else:
_SCREAMING_SNAKE_CASE : str = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
if isinstance(self.scheduler , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(
model_output=__lowerCamelCase , timestep=__lowerCamelCase , sample=__lowerCamelCase , generator=__lowerCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_SCREAMING_SNAKE_CASE : str = mask[:, step, :, :mask_start]
if mask_end > 0:
_SCREAMING_SNAKE_CASE : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
_SCREAMING_SNAKE_CASE : Dict = self.vqvae.decode(__lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_SCREAMING_SNAKE_CASE : List[str] = (images * 2_5_5).round().astype("uint8" )
_SCREAMING_SNAKE_CASE : Tuple = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__lowerCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
_SCREAMING_SNAKE_CASE : Tuple = [self.mel.image_to_audio(__lowerCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__lowerCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowerCamelCase ) )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample / 2_5_5) * 2 - 1
_SCREAMING_SNAKE_CASE : Any = torch.Tensor(__lowerCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__lowerCamelCase , __lowerCamelCase )["sample"]
_SCREAMING_SNAKE_CASE : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_SCREAMING_SNAKE_CASE : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_SCREAMING_SNAKE_CASE : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> torch.Tensor:
_SCREAMING_SNAKE_CASE : Any = acos(torch.dot(torch.flatten(__lowerCamelCase ) , torch.flatten(__lowerCamelCase ) ) / torch.norm(__lowerCamelCase ) / torch.norm(__lowerCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__lowerCamelCase ) + sin(alpha * theta ) * xa / sin(__lowerCamelCase )
| 325
| 0
|
from typing import List
import numpy as np
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = {key: len(_lowercase ) for key, value in gen_kwargs.items() if isinstance(_lowercase, _lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_SCREAMING_SNAKE_CASE : Optional[Any] = max(lists_lengths.values(), default=0 )
return max(1, _lowercase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = []
for group_idx in range(_lowercase ):
_SCREAMING_SNAKE_CASE : Any = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_SCREAMING_SNAKE_CASE : int = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_SCREAMING_SNAKE_CASE : str = range(_lowercase, start + num_shards_to_add )
shards_indices_per_group.append(_lowercase )
return shards_indices_per_group
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = _number_of_shards_in_gen_kwargs(_lowercase )
if num_shards == 1:
return [dict(_lowercase )]
else:
_SCREAMING_SNAKE_CASE : Dict = _distribute_shards(num_shards=_lowercase, max_num_jobs=_lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowercase, _lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowercase ) )
]
def lowerCamelCase__ (__lowerCamelCase ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], _lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = {len(_lowercase ) for value in gen_kwargs.values() if isinstance(_lowercase, _lowercase )}
_SCREAMING_SNAKE_CASE : List[str] = {}
for size in list_sizes:
_SCREAMING_SNAKE_CASE : List[Any] = list(range(_lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_SCREAMING_SNAKE_CASE : int = dict(_lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowercase, _lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = [value[i] for i in indices_per_size[len(_lowercase )]]
return shuffled_kwargs
| 363
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 325
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCAmelCase__( lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = "mra"
def __init__( self , __lowerCamelCase=5_0_2_6_5 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-5 , __lowerCamelCase="absolute" , __lowerCamelCase=4 , __lowerCamelCase="full" , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Dict = type_vocab_size
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
_SCREAMING_SNAKE_CASE : Any = block_per_row
_SCREAMING_SNAKE_CASE : Optional[Any] = approx_mode
_SCREAMING_SNAKE_CASE : Optional[Any] = initial_prior_first_n_blocks
_SCREAMING_SNAKE_CASE : int = initial_prior_diagonal_n_blocks
| 364
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ =OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
UpperCamelCase__ =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase__ (__lowerCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_SCREAMING_SNAKE_CASE : Optional[int] = model_type_to_module_name(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = importlib.import_module(f""".{module_name}""", "transformers.models" )
try:
return getattr(snake_case__, snake_case__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case__, "__name__", snake_case__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module("transformers" )
if hasattr(snake_case__, snake_case__ ):
return getattr(snake_case__, snake_case__ )
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = False, __lowerCamelCase = False, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = None, __lowerCamelCase = False, **__lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Optional[Any] = get_file_from_repo(
snake_case__, snake_case__, cache_dir=snake_case__, force_download=snake_case__, resume_download=snake_case__, proxies=snake_case__, use_auth_token=snake_case__, revision=snake_case__, local_files_only=snake_case__, )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(snake_case__, encoding="utf-8" ) as reader:
return json.load(snake_case__ )
class lowerCAmelCase__:
def __init__( self ) -> List[str]:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase_ )
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("config" , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("trust_remote_code" , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = ImageProcessingMixin.get_image_processor_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = config_dict.get("image_processor_type" , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_SCREAMING_SNAKE_CASE : List[str] = config_dict.pop("feature_extractor_type" , lowerCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_SCREAMING_SNAKE_CASE : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# It could be in `config.image_processor_type``
_SCREAMING_SNAKE_CASE : str = getattr(lowerCAmelCase_ , "image_processor_type" , lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_SCREAMING_SNAKE_CASE : int = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_SCREAMING_SNAKE_CASE : List[str] = image_processor_class_from_name(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = image_processor_auto_map is not None
_SCREAMING_SNAKE_CASE : Any = image_processor_class is not None or type(lowerCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = resolve_trust_remote_code(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if has_remote_code and trust_remote_code:
_SCREAMING_SNAKE_CASE : str = get_class_from_dynamic_module(
lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("code_revision" , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_SCREAMING_SNAKE_CASE : Any = IMAGE_PROCESSOR_MAPPING[type(lowerCAmelCase_ )]
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
IMAGE_PROCESSOR_MAPPING.register(lowerCAmelCase_ , lowerCAmelCase_ )
| 365
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Any = sum(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1, n + 1 ):
_SCREAMING_SNAKE_CASE : str = True
for i in range(1, s + 1 ):
_SCREAMING_SNAKE_CASE : Optional[int] = False
for i in range(1, n + 1 ):
for j in range(1, s + 1 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
_SCREAMING_SNAKE_CASE : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ), -1, -1 ):
if dp[n][j] is True:
_SCREAMING_SNAKE_CASE : List[str] = s - 2 * j
break
return diff
| 366
|
from maths.prime_check import is_prime
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 0
|
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = [0] * len(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : int = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
while queue:
_SCREAMING_SNAKE_CASE : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
if cnt != len(SCREAMING_SNAKE_CASE__ ):
print("Cycle exists" )
else:
print(SCREAMING_SNAKE_CASE__ )
# Adjacency List of Graph
UpperCamelCase__ ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 367
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 0
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase__ =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case = None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, ):
import pyspark
def generate_fn():
_SCREAMING_SNAKE_CASE : List[Any] = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
_SCREAMING_SNAKE_CASE : Dict = df_with_partition_id.select("*" ).where(f"""part_id = {partition_id}""" ).drop("part_id" )
_SCREAMING_SNAKE_CASE : List[Any] = partition_df.collect()
_SCREAMING_SNAKE_CASE : List[Any] = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = df
_SCREAMING_SNAKE_CASE : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
_SCREAMING_SNAKE_CASE : Optional[int] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Optional[int]:
yield from self.generate_examples_fn()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> "SparkExamplesIterable":
_SCREAMING_SNAKE_CASE : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> "SparkExamplesIterable":
_SCREAMING_SNAKE_CASE : List[str] = self.split_shard_indices_by_worker(__lowerCamelCase , __lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> int:
return len(self.partition_order )
class lowerCAmelCase__( datasets.DatasetBuilder ):
'''simple docstring'''
__snake_case = SparkConfig
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> str:
import pyspark
_SCREAMING_SNAKE_CASE : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_SCREAMING_SNAKE_CASE : List[Any] = df
_SCREAMING_SNAKE_CASE : List[str] = working_dir
super().__init__(
cache_dir=__lowerCamelCase , config_name=str(self.df.semanticHash() ) , **__lowerCamelCase , )
def UpperCamelCase_ ( self ) -> Tuple:
def create_cache_and_write_probe(__lowerCamelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCamelCase , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_SCREAMING_SNAKE_CASE : Optional[int] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def UpperCamelCase_ ( self ) -> Dict:
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
import pyspark
def get_arrow_batch_size(__lowerCamelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.df.count()
_SCREAMING_SNAKE_CASE : int = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_SCREAMING_SNAKE_CASE : int = (
self.df.limit(__lowerCamelCase )
.repartition(1 )
.mapInArrow(__lowerCamelCase , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_SCREAMING_SNAKE_CASE : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_SCREAMING_SNAKE_CASE : List[Any] = min(__lowerCamelCase , int(approx_total_size / max_shard_size ) )
_SCREAMING_SNAKE_CASE : Tuple = self.df.repartition(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
_SCREAMING_SNAKE_CASE : Optional[int] = ParquetWriter if file_format == 'parquet' else ArrowWriter
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self._working_dir , os.path.basename(__lowerCamelCase ) ) if self._working_dir else fpath
_SCREAMING_SNAKE_CASE : str = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_SCREAMING_SNAKE_CASE : List[str] = self.config.features
_SCREAMING_SNAKE_CASE : str = self._writer_batch_size
_SCREAMING_SNAKE_CASE : Tuple = self._fs.storage_options
def write_arrow(__lowerCamelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_SCREAMING_SNAKE_CASE : str = pyspark.TaskContext().taskAttemptId()
_SCREAMING_SNAKE_CASE : Dict = next(__lowerCamelCase , __lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Any = writer_class(
features=__lowerCamelCase , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=__lowerCamelCase , storage_options=__lowerCamelCase , embed_local_files=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_SCREAMING_SNAKE_CASE : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
_SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=__lowerCamelCase , storage_options=__lowerCamelCase , embed_local_files=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Any = pa.Table.from_batches([batch] )
writer.write_table(__lowerCamelCase )
if writer._num_bytes > 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(os.path.dirname(__lowerCamelCase ) , os.path.basename(__lowerCamelCase ) )
shutil.move(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.df.mapInArrow(__lowerCamelCase , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = "arrow" , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ) -> Any:
self._validate_cache_dir()
_SCREAMING_SNAKE_CASE : List[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = not is_remote_filesystem(self._fs )
_SCREAMING_SNAKE_CASE : Any = os.path.join if is_local else posixpath.join
_SCREAMING_SNAKE_CASE : List[str] = '-TTTTT-SSSSS-of-NNNNN'
_SCREAMING_SNAKE_CASE : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_SCREAMING_SNAKE_CASE : Optional[int] = path_join(self._output_dir , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : Tuple = []
for task_id, content in self._prepare_split_single(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
(
_SCREAMING_SNAKE_CASE
) : Optional[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = total_num_examples
_SCREAMING_SNAKE_CASE : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_SCREAMING_SNAKE_CASE : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_SCREAMING_SNAKE_CASE : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
rename(
__lowerCamelCase , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
_SCREAMING_SNAKE_CASE : int = 0
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : List[str] = task_id_and_num_shards[i]
for shard_id in range(__lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCamelCase , len(__lowerCamelCase ) ).map(lambda __lowerCamelCase : _rename_shard(*__lowerCamelCase ) ).collect()
else:
# don't use any pattern
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(__lowerCamelCase , "" ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 368
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
| 0
|
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase__ (__lowerCamelCase ):
@wraps(__UpperCamelCase )
def _inner_fn(*__lowerCamelCase, **__lowerCamelCase ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future."""), __UpperCamelCase, )
return fn(*__UpperCamelCase, **__UpperCamelCase )
return _inner_fn
| 369
|
from math import isqrt, loga
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
return [i for i in range(2, __lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ (__lowerCamelCase = 800800, __lowerCamelCase = 800800 ):
_SCREAMING_SNAKE_CASE : Optional[int] = degree * loga(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = int(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = calculate_prime_numbers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 325
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=2 , __lowerCamelCase=9_9 , __lowerCamelCase=0 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase="last" , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : str = batch_size
_SCREAMING_SNAKE_CASE : Optional[int] = seq_length
_SCREAMING_SNAKE_CASE : Tuple = is_training
_SCREAMING_SNAKE_CASE : Optional[int] = use_input_lengths
_SCREAMING_SNAKE_CASE : str = use_token_type_ids
_SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
_SCREAMING_SNAKE_CASE : List[str] = gelu_activation
_SCREAMING_SNAKE_CASE : Union[str, Any] = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE : int = causal
_SCREAMING_SNAKE_CASE : Optional[int] = asm
_SCREAMING_SNAKE_CASE : int = n_langs
_SCREAMING_SNAKE_CASE : List[str] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = n_special
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
_SCREAMING_SNAKE_CASE : Any = num_choices
_SCREAMING_SNAKE_CASE : Optional[int] = summary_type
_SCREAMING_SNAKE_CASE : int = use_proj
_SCREAMING_SNAKE_CASE : Dict = scope
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : str = None
if self.use_input_lengths:
_SCREAMING_SNAKE_CASE : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , 2 ).float()
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self ) -> Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Any = FlaubertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : str = model(_lowercase , lengths=_lowercase , langs=_lowercase )
_SCREAMING_SNAKE_CASE : int = model(_lowercase , langs=_lowercase )
_SCREAMING_SNAKE_CASE : Tuple = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = FlaubertWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = FlaubertForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(_lowercase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = FlaubertForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : Any = model(_lowercase )
_SCREAMING_SNAKE_CASE : Dict = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , )
_SCREAMING_SNAKE_CASE : Dict = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , )
((_SCREAMING_SNAKE_CASE ) , ) : str = result_with_labels.to_tuple()
_SCREAMING_SNAKE_CASE : Optional[int] = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
((_SCREAMING_SNAKE_CASE ) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Dict:
_SCREAMING_SNAKE_CASE : int = FlaubertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(_lowercase )
_SCREAMING_SNAKE_CASE : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.num_labels
_SCREAMING_SNAKE_CASE : Dict = FlaubertForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = FlaubertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE : List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = FlaubertModelTester(self )
_SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_lowercase , emb_dim=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowercase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowercase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_lowercase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowercase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowercase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_lowercase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_lowercase )
@slow
def UpperCamelCase_ ( self ) -> Any:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaubertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=_lowercase )
_SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.trace(
_lowercase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , "traced_model.pt" ) )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_lowercase , "traced_model.pt" ) , map_location=_lowercase )
loaded(inputs_dict["input_ids"].to(_lowercase ) , inputs_dict["attention_mask"].to(_lowercase ) )
@require_torch
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(_lowercase )[0]
_SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
_SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 370
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(__lowerCAmelCase )
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("nielsr/rvlcdip-demo" )
_SCREAMING_SNAKE_CASE : Any = dataset["train"][0]["image"].convert("RGB" )
_SCREAMING_SNAKE_CASE : int = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model(**__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=__lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 371
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x
| 325
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Optional[int] = value
_SCREAMING_SNAKE_CASE : Node | None = None
_SCREAMING_SNAKE_CASE : Node | None = None
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tree
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.