code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
A : str = logging.get_logger(__name__)
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :List[str] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) | 516 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'facebook/bart-large-mnli'
A = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
A = 'text_classifier'
A = AutoTokenizer
A = AutoModelForSequenceClassification
A = ['text', ['text']]
A = ['text']
def lowerCamelCase__ ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setup()
UpperCamelCase__ = self.model.config
UpperCamelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase__ = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) , [f'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = outputs.logits
UpperCamelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 516 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__UpperCamelCase ):
snake_case_ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
snake_case_ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__UpperCamelCase ):
snake_case_ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
snake_case_ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
snake_case_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ = FlaxBertModel.from_pretrained(__UpperCamelCase )
snake_case_ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
snake_case_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ = FlaxRobertaModel.from_pretrained(__UpperCamelCase )
snake_case_ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
snake_case_ = FlaxAutoModel.from_pretrained('bert-base' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
snake_case_ = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
snake_case_ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(__UpperCamelCase , 'Use `from_pt=True` to load this model' ):
snake_case_ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 46 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 131 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__snake_case = get_tests_dir('fixtures')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> str:
# A mock response for an HTTP head request to emulate server down
lowercase__ : List[str] = mock.Mock()
lowercase__ : Dict = 500
lowercase__ : Any = {}
lowercase__ : int = HTTPError
lowercase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
lowercase__ : Dict = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase__ ) as mock_head:
lowercase__ : Dict = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Dict = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def UpperCAmelCase__( self ) -> Tuple:
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
lowercase__ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowerCamelCase__ )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__( cls ) -> int:
lowercase__ : str = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCAmelCase__( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[str] = ViTImageProcessor.from_pretrained(lowerCamelCase__ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
lowercase__ : str = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase__ , repo_id="""test-image-processor""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Tuple = ViTImageProcessor.from_pretrained(lowerCamelCase__ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
lowercase__ : Any = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ : List[Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Dict:
CustomImageProcessor.register_for_auto_class()
lowercase__ : List[str] = CustomImageProcessor.from_pretrained(lowerCamelCase__ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
lowercase__ : Dict = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" ) | 200 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class __UpperCamelCase ( _UpperCamelCase ):
__snake_case :Optional[Any] = 'falcon'
__snake_case :Tuple = ['past_key_values']
def __init__( self : List[Any] , _lowerCAmelCase : int=6_5024 , _lowerCAmelCase : int=4544 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : str=71 , _lowerCAmelCase : Optional[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Union[str, Any]=11 , _lowerCAmelCase : Optional[Any]=11 , **_lowerCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
# Backward compatibility with n_embed kwarg
__lowercase = kwargs.pop("""n_embed""" , _lowerCAmelCase )
__lowercase = hidden_size if n_embed is None else n_embed
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = num_attention_heads if num_kv_heads is None else num_kv_heads
__lowercase = alibi
__lowercase = new_decoder_architecture
__lowercase = multi_query # Ignored when new_decoder_architecture is True
__lowercase = parallel_attn
__lowercase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return not self.alibi
| 714 |
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 0 |
import argparse
from collections import defaultdict
def _lowercase( __a : Union[str, Any] , __a : Dict , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[int] ):
a__ =f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =f"""class {class_name}("""
a__ =f"""{4 * ' '}def {test_name}("""
a__ =f"""{8 * ' '}{correct_line.split()[0]}"""
a__ =f"""{16 * ' '}{correct_line.split()[0]}"""
a__ =False
a__ =False
a__ =False
a__ =False
a__ =0
a__ =0
a__ =[]
for line in lines:
if line.startswith(__a ):
a__ =True
elif in_class and line.startswith(__a ):
a__ =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
a__ =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
a__ =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
a__ =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
a__ =a__ =a__ =a__ =False
else:
new_lines.append(__a )
with open(__a , 'w' ) as f:
for line in new_lines:
f.write(__a )
def _lowercase( __a : int , __a : Union[str, Any]=None ):
if fail is not None:
with open(__a , 'r' ) as f:
a__ ={l.strip() for l in f.readlines()}
else:
a__ =None
with open(__a , 'r' ) as f:
a__ =f.readlines()
a__ =defaultdict(__a )
for line in correct_lines:
a__ , a__ , a__ , a__ =line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_lowerCAmelCase: int = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 20 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : UNetaDModel
__a : ScoreSdeVeScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 20_00 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
UpperCamelCase_ : str = self.unet.config.sample_size
UpperCamelCase_ : int = (batch_size, 3, img_size, img_size)
UpperCamelCase_ : List[Any] = self.unet
UpperCamelCase_ : Union[str, Any] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
UpperCamelCase_ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase_ : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCamelCase_ : List[str] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
UpperCamelCase_ : List[str] = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
UpperCamelCase_ : List[Any] = model(__lowerCAmelCase , __lowerCAmelCase ).sample
UpperCamelCase_ : Tuple = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
UpperCamelCase_ , UpperCamelCase_ : List[Any] = output.prev_sample, output.prev_sample_mean
UpperCamelCase_ : Tuple = sample_mean.clamp(0 , 1 )
UpperCamelCase_ : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ : Dict = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 208 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , snake_case_=1 / 255 , snake_case_=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_pad
def A__ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> str:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[str]:
__lowerCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def A__ ( self ) -> Optional[Any]:
pass
def A__ ( self ) -> Tuple:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
# prepare image and target
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowerCAmelCase = json.loads(f.read() )
__lowerCAmelCase = {"""image_id""": 39_769, """annotations""": target}
# encode them
__lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__lowerCAmelCase = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors="""pt""" )
# verify pixel values
__lowerCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case_ )
__lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case_ , atol=1e-4 ) )
# verify area
__lowerCAmelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case_ ) )
# verify boxes
__lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case_ )
__lowerCAmelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case_ , atol=1e-3 ) )
# verify image_id
__lowerCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case_ ) )
# verify is_crowd
__lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case_ ) )
# verify class_labels
__lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case_ ) )
# verify orig_size
__lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case_ ) )
# verify size
__lowerCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case_ ) )
@slow
def A__ ( self ) -> Union[str, Any]:
# prepare image, target and masks_path
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowerCAmelCase = json.loads(f.read() )
__lowerCAmelCase = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
__lowerCAmelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowerCAmelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__lowerCAmelCase = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors="""pt""" )
# verify pixel values
__lowerCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case_ )
__lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case_ , atol=1e-4 ) )
# verify area
__lowerCAmelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case_ ) )
# verify boxes
__lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case_ )
__lowerCAmelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case_ , atol=1e-3 ) )
# verify image_id
__lowerCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case_ ) )
# verify is_crowd
__lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case_ ) )
# verify class_labels
__lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case_ ) )
# verify masks
__lowerCAmelCase = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , snake_case_ )
# verify orig_size
__lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case_ ) )
# verify size
__lowerCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case_ ) )
| 573 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowercase (_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__lowerCAmelCase = precision
__lowerCAmelCase = ceil(precision / 14 )
__lowerCAmelCase = 42_6880 * Decimal(1_0005 ).sqrt()
__lowerCAmelCase = 1
__lowerCAmelCase = 1359_1409
__lowerCAmelCase = Decimal(_lowerCAmelCase )
for k in range(1 , _lowerCAmelCase ):
__lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCAmelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 573 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''Hello world! cécé herlolip'''
lowerCAmelCase_ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = BertAbsConfig(
temp_dir='''.''' , finetune_bert=SCREAMING_SNAKE_CASE__ , large=SCREAMING_SNAKE_CASE__ , share_emb=SCREAMING_SNAKE_CASE__ , use_bert_emb=SCREAMING_SNAKE_CASE__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : storage )
snake_case_ = AbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device('''cpu''' ) , SCREAMING_SNAKE_CASE__ )
original.eval()
snake_case_ = BertAbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
snake_case_ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
snake_case_ = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
snake_case_ = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case_ = encoder_input_ids
snake_case_ = decoder_input_ids
snake_case_ = snake_case_ = None
snake_case_ = None
snake_case_ = snake_case_ = None
snake_case_ = snake_case_ = None
snake_case_ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case_ = original(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
snake_case_ = original.generator(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
snake_case_ = new_model.generator(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 39 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = filter(lambda __snake_case : p.requires_grad , model.parameters() )
lowercase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCamelCase : List[Any] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
if metric == "rouge2":
lowercase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowercase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowercase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
lowercase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
lowercase = ModelCheckpoint(
dirpath=lowercase__ , filename=lowercase__ , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowercase__ , verbose=lowercase__ , )
class a ( pl.Callback ):
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase = od / 'test_results.txt'
lowercase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
lowercase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , 'a+' ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
lowercase = val.item()
lowercase = F'{key}: {val:.6f}\n'
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowercase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__UpperCamelCase )
@rank_zero_only
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
try:
lowercase = pl_module.model.model.num_parameters()
except AttributeError:
lowercase = pl_module.model.num_parameters()
lowercase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , 'test' )
@rank_zero_only
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 702 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Dict=1_8 , SCREAMING_SNAKE_CASE__ : str=3_0 , SCREAMING_SNAKE_CASE__ : Dict=4_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=True , ) -> str:
a_ : Optional[int] = size if size is not None else {'height': 1_8, 'width': 1_8}
a_ : str = parent
a_ : Tuple = batch_size
a_ : List[Any] = num_channels
a_ : Optional[int] = image_size
a_ : Tuple = min_resolution
a_ : List[Any] = max_resolution
a_ : Optional[int] = do_resize
a_ : Tuple = size
a_ : Tuple = apply_ocr
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'apply_ocr' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
a_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
a_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
# Initialize image_processing
a_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
a_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCamelCase )
self.assertIsInstance(encoding.boxes , _UpperCamelCase )
# Test batched
a_ : List[Any] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Initialize image_processing
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
a_ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ : List[Any] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
# Initialize image_processing
a_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
a_ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ : Optional[int] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# with apply_OCR = True
a_ : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ : Optional[int] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
a_ : Union[str, Any] = image_processing(_UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ : Union[str, Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ : List[Any] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCamelCase )
self.assertListEqual(encoding.boxes , _UpperCamelCase )
# with apply_OCR = False
a_ : Any = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
a_ : List[Any] = image_processing(_UpperCamelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 570 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( __UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_A = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_A = 0
# Doctest custom flag to ignore output.
lowerCAmelCase = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase ):
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Tuple:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase = CustomOutputChecker
lowerCAmelCase = HfDoctestModule
lowerCAmelCase = HfDocTestParser
| 292 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "AutoImageProcessor"
A = "AutoTokenizer"
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = self.image_processor
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCamelCase : List[Any] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
__UpperCamelCase : Optional[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> int:
return ["input_ids", "attention_mask", "pixel_values"]
| 713 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "funnel"
A = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__(self , _UpperCAmelCase=3_0_5_2_2 , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=1_2 , _UpperCAmelCase=6_4 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=None , _UpperCAmelCase=1E-9 , _UpperCAmelCase="mean" , _UpperCAmelCase="relative_shift" , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> Any:
__UpperCamelCase : Union[str, Any] = vocab_size
__UpperCamelCase : Any = block_sizes
__UpperCamelCase : str = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__UpperCamelCase : Union[str, Any] = num_decoder_layers
__UpperCamelCase : List[Any] = d_model
__UpperCamelCase : Optional[int] = n_head
__UpperCamelCase : Tuple = d_head
__UpperCamelCase : str = d_inner
__UpperCamelCase : Dict = hidden_act
__UpperCamelCase : Any = hidden_dropout
__UpperCamelCase : Dict = attention_dropout
__UpperCamelCase : Dict = activation_dropout
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Optional[int] = initializer_std
__UpperCamelCase : Optional[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
__UpperCamelCase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
__UpperCamelCase : int = attention_type
__UpperCamelCase : Dict = separate_cls
__UpperCamelCase : List[str] = truncate_seq
__UpperCamelCase : List[str] = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def a_ (self ) -> Optional[Any]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def a_ (self , _UpperCAmelCase ) -> List[Any]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def a_ (self ) -> List[Any]:
return len(self.block_sizes )
@num_blocks.setter
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 399 | 0 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = int(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case=300 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE:str = F'''{elt:.6f}''' if isinstance(snake_case , snake_case ) else str(snake_case )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _snake_case :
_A : Any = 5
_A : Optional[Any] = 0.2
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional["NotebookTrainingTracker"] = None ,SCREAMING_SNAKE_CASE__ : int = 300 ,):
SCREAMING_SNAKE_CASE:List[Any] = total
SCREAMING_SNAKE_CASE:Dict = "" if prefix is None else prefix
SCREAMING_SNAKE_CASE:Optional[int] = leave
SCREAMING_SNAKE_CASE:Optional[Any] = parent
SCREAMING_SNAKE_CASE:List[Any] = width
SCREAMING_SNAKE_CASE:Optional[Any] = None
SCREAMING_SNAKE_CASE:Optional[int] = None
SCREAMING_SNAKE_CASE:int = None
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : str = None ):
SCREAMING_SNAKE_CASE:Any = value
if comment is not None:
SCREAMING_SNAKE_CASE:List[Any] = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE:Any = time.time()
SCREAMING_SNAKE_CASE:Any = value
SCREAMING_SNAKE_CASE:Optional[Any] = None
SCREAMING_SNAKE_CASE:Union[str, Any] = self.warmup
SCREAMING_SNAKE_CASE:List[Any] = 1
self.update_bar(SCREAMING_SNAKE_CASE__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE:Any = time.time()
SCREAMING_SNAKE_CASE:Union[str, Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE:Optional[int] = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE:int = None
if value >= self.total:
SCREAMING_SNAKE_CASE:Any = self.total
SCREAMING_SNAKE_CASE:List[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = value
SCREAMING_SNAKE_CASE:str = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE:Optional[int] = 1
else:
SCREAMING_SNAKE_CASE:int = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE:Dict = " " * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE__ ) )) + str(SCREAMING_SNAKE_CASE__ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE:List[Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE:List[str] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Optional[Any] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE:Optional[Any] = disp.display(disp.HTML(self.html_code ) ,display_id=SCREAMING_SNAKE_CASE__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCamelCase ( self : Optional[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _snake_case ( _a ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=None ):
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE:List[Any] = None
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Optional[int] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE:Any = disp.display(disp.HTML(self.html_code ) ,display_id=SCREAMING_SNAKE_CASE__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ):
if self.inner_table is None:
SCREAMING_SNAKE_CASE:int = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE:Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=300 ):
SCREAMING_SNAKE_CASE:Dict = NotebookProgressBar(SCREAMING_SNAKE_CASE__ ,prefix=SCREAMING_SNAKE_CASE__ ,parent=self ,width=SCREAMING_SNAKE_CASE__ )
return self.child_bar
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = None
self.display()
class _snake_case ( _a ):
def __init__( self : str ):
SCREAMING_SNAKE_CASE:Any = None
SCREAMING_SNAKE_CASE:Any = None
SCREAMING_SNAKE_CASE:Optional[Any] = False
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Any = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:List[Any] = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
SCREAMING_SNAKE_CASE:str = NotebookTrainingTracker(state.max_steps ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:Dict = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 ,comment=F'''Epoch {epoch}/{state.num_train_epochs}''' ,force_update=self._force_next_update ,)
SCREAMING_SNAKE_CASE:List[str] = False
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : str ):
if not has_length(SCREAMING_SNAKE_CASE__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE:Dict = NotebookProgressBar(len(SCREAMING_SNAKE_CASE__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple ):
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE:Union[str, Any] = None
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE:Tuple = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE:Dict = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : int ):
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE:Union[str, Any] = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE:Optional[Any] = log["loss"]
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE:List[str] = int(state.epoch )
else:
SCREAMING_SNAKE_CASE:List[str] = state.global_step
SCREAMING_SNAKE_CASE:Any = "eval"
for k in metrics:
if k.endswith("_loss" ):
SCREAMING_SNAKE_CASE:Dict = re.sub(R"\_loss$" ,"" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = metrics.pop("total_flos" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = metrics.pop("epoch" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = metrics.pop(F'''{metric_key_prefix}_runtime''' ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' ,SCREAMING_SNAKE_CASE__ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE:Dict = v
else:
SCREAMING_SNAKE_CASE:str = k.split("_" )
SCREAMING_SNAKE_CASE:Any = " ".join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE:Tuple = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE__ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE:Optional[int] = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE:Tuple = True
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Dict ):
self.training_tracker.update(
state.global_step ,comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' ,force_update=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = None
| 143 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = "▁"
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _snake_case ( _a , unittest.TestCase ):
_A : List[str] = BertGenerationTokenizer
_A : Any = False
_A : int = True
def __UpperCamelCase ( self : List[str] ):
super().setUp()
SCREAMING_SNAKE_CASE:Union[str, Any] = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:List[str] = "<s>"
SCREAMING_SNAKE_CASE:int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<unk>" )
self.assertEqual(vocab_keys[1] ,"<s>" )
self.assertEqual(vocab_keys[-1] ,"<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,1_002 )
def __UpperCamelCase ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_000 )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,[285, 46, 10, 170, 382] ,)
SCREAMING_SNAKE_CASE:List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
SCREAMING_SNAKE_CASE:List[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
@cached_property
def __UpperCamelCase ( self : List[Any] ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Optional[int] = "Hello World!"
SCREAMING_SNAKE_CASE:Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:int = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
SCREAMING_SNAKE_CASE:Any = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def __UpperCamelCase ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE:Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE:List[str] = " ".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ,return_token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] ,return_tensors="pt" ,return_token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = BertGenerationConfig()
SCREAMING_SNAKE_CASE:List[Any] = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def __UpperCamelCase ( self : List[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE:Any = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ ,model_name="google/bert_for_seq_generation_L-24_bbc_encoder" ,revision="c817d1fd1be2ffa69431227a1fe320544943d4db" ,)
| 143 | 1 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__SCREAMING_SNAKE_CASE = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class a__ ( A__ ):
UpperCAmelCase__ = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase__ = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
UpperCAmelCase__ = '''translator'''
UpperCAmelCase__ = AutoTokenizer
UpperCAmelCase__ = AutoModelForSeqaSeqLM
UpperCAmelCase__ = LANGUAGE_CODES
UpperCAmelCase__ = ['''text''', '''text''', '''text''']
UpperCAmelCase__ = ['''text''']
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[str] , _lowerCamelCase :List[str] , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
UpperCamelCase_ : Optional[Any] =self.lang_to_code[src_lang]
UpperCamelCase_ : Optional[int] =self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCamelCase , return_tensors='pt' , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Optional[int] ):
'''simple docstring'''
return self.model.generate(**_lowerCamelCase )
def lowerCamelCase_ ( self :int , _lowerCamelCase :Any ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCamelCase )
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE = TaTokenizerFast
__SCREAMING_SNAKE_CASE = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 395 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ["""input_ids""", """attention_mask"""]
lowerCamelCase = RobertaTokenizer
def __init__( self : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any]="replace" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=True , **UpperCamelCase__ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
snake_case : int = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
snake_case : int = add_prefix_space
snake_case : List[Any] = pre_tok_class(**UpperCamelCase__ )
snake_case : Dict = add_prefix_space
snake_case : List[Any] = '''post_processor'''
snake_case : Any = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
snake_case : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case : Tuple = tuple(state['''sep'''] )
if "cls" in state:
snake_case : Optional[Any] = tuple(state['''cls'''] )
snake_case : Any = False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
snake_case : Dict = add_prefix_space
snake_case : Optional[int] = True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
snake_case : Optional[int] = trim_offsets
snake_case : Optional[Any] = True
if changes_to_apply:
snake_case : List[Any] = getattr(UpperCamelCase__ , state.pop('''type''' ) )
snake_case : List[str] = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
snake_case : Tuple = value
def lowerCAmelCase ( self : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ) -> BatchEncoding:
"""simple docstring"""
snake_case : Any = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
snake_case : Optional[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any]=None ) -> Any:
"""simple docstring"""
snake_case : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 638 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 638 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowercase: List[Any] = '''\
'''
_lowercase: List[Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
_lowercase: Union[str, Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : int = 16 , lowercase__ : bool = True , lowercase__ : Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCAmelCase = 'cuda'
else:
_lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_lowerCAmelCase = model.to(__UpperCamelCase )
_lowerCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCAmelCase = model.config.max_length - 1
else:
_lowerCAmelCase = model.config.max_length
_lowerCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='pt' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_lowerCAmelCase = encodings['input_ids']
_lowerCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCAmelCase = []
_lowerCAmelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_lowerCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_lowerCAmelCase = encoded_texts[start_index:end_index]
_lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_lowerCAmelCase = encoded_batch
with torch.no_grad():
_lowerCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_lowerCAmelCase = out_logits[..., :-1, :].contiguous()
_lowerCAmelCase = labels[..., 1:].contiguous()
_lowerCAmelCase = attn_mask[..., 1:].contiguous()
_lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 719 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase ( snake_case=None ):
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('test' )
else:
_lowerCAmelCase = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=snake_case , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
_lowerCAmelCase = script_name
else:
_lowerCAmelCase = F'--config_file={args.config_file} {script_name}'
_lowerCAmelCase = ['accelerate-launch'] + test_args.split()
_lowerCAmelCase = execute_subprocess_async(snake_case , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _lowerCamelCase ( ):
_lowerCAmelCase = test_command_parser()
_lowerCAmelCase = parser.parse_args()
test_command(snake_case )
if __name__ == "__main__":
main()
| 225 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : List[str] = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : int = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 560 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : List[str] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = tmp_path / '''cache'''
lowercase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowercase__ : int = features.copy() if features else default_expected_features
lowercase__ : Dict = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read()
_check_sql_dataset(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
lowercase__ : Dict = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = tmp_path / '''cache'''
lowercase__ : Tuple = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
lowercase__ : Any = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Any = tmp_path / '''cache'''
lowercase__ : int = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowercase__ : Optional[Any] = iter_sql_file(__lowerCamelCase )
lowercase__ : List[str] = iter_sql_file(__lowerCamelCase )
for rowa, rowa in zip(__lowerCamelCase , __lowerCamelCase ):
assert rowa == rowa
@require_sqlalchemy
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : List[Any] = tmp_path / '''cache'''
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , '''tmp.sql''' )
lowercase__ : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCamelCase ).read()
with pytest.raises(__lowerCamelCase ):
SqlDatasetWriter(__lowerCamelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 560 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
lowerCamelCase_ = {}
with open(__lowerCAmelCase ,'r' ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
lowerCamelCase_ = line.strip()
if line:
lowerCamelCase_ = line.split()
lowerCamelCase_ = line_number
lowerCamelCase_ = words[0]
lowerCamelCase_ = value
return result
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
for attribute in key.split('.' ):
lowerCamelCase_ = getattr(__lowerCAmelCase ,__lowerCAmelCase )
lowerCamelCase_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
lowerCamelCase_ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCamelCase_ = 'param'
if weight_type is not None and weight_type != "param":
lowerCamelCase_ = getattr(__lowerCAmelCase ,__lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase_ = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCamelCase_ = getattr(__lowerCAmelCase ,__lowerCAmelCase )
lowerCamelCase_ = shape_pointer.shape
# let's reduce dimension
lowerCamelCase_ = value[0]
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCamelCase_ = getattr(__lowerCAmelCase ,__lowerCAmelCase )
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
lowerCamelCase_ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCamelCase_ = 'param'
if weight_type is not None and weight_type != "param":
lowerCamelCase_ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase_ = '.'.join([key, hf_param_name] )
else:
lowerCamelCase_ = key
lowerCamelCase_ = value if 'lm_head' in full_key else value[0]
A_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> Dict:
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(__lowerCAmelCase )[0].split('.' )[-2]
lowerCamelCase_ = mapped_key.replace('*' ,__lowerCAmelCase )
if "weight_g" in name:
lowerCamelCase_ = 'weight_g'
elif "weight_v" in name:
lowerCamelCase_ = 'weight_v'
elif "bias" in name:
lowerCamelCase_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = 'weight'
else:
lowerCamelCase_ = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return is_used
return is_used
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,hf_model.config.feat_extract_norm == 'group' ,)
lowerCamelCase_ = True
else:
lowerCamelCase_ = load_wavaveca_layer(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = full_name.split('conv_layers.' )[-1]
lowerCamelCase_ = name.split('.' )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCamelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=False ) -> Optional[Any]:
if config_path is not None:
lowerCamelCase_ = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
lowerCamelCase_ = WavaVecaConfig()
if is_seq_class:
lowerCamelCase_ = read_txt_into_dict(__lowerCAmelCase )
lowerCamelCase_ = idalabel
lowerCamelCase_ = WavaVecaForSequenceClassification(__lowerCAmelCase )
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,)
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(__lowerCAmelCase ,'vocab.json' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
lowerCamelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ = 0
lowerCamelCase_ = 1
with open(__lowerCAmelCase ,'w' ,encoding='utf-8' ) as vocab_handle:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
lowerCamelCase_ = WavaVecaCTCTokenizer(
__lowerCAmelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='|' ,do_lower_case=__lowerCAmelCase ,)
lowerCamelCase_ = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,)
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase ,tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowerCamelCase_ = WavaVecaForCTC(__lowerCAmelCase )
else:
lowerCamelCase_ = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase_ = argparse.Namespace(task='audio_pretraining' )
lowerCamelCase_ = fairseq.tasks.setup_task(__lowerCAmelCase )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__lowerCAmelCase )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(__lowerCAmelCase ,__lowerCAmelCase ,not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
A_ = parser.parse_args()
A_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 709 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCamelCase_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'facebook/sam-vit-huge'
lowerCamelCase_ = pipeline('mask-generation' , model=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
] , )
| 384 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A = '''bart'''
A = True
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Dict:
if LOAD_DENSE_INDEX:
__a : Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''')
__a : Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''').to('''cuda:0''')
__a : Union[str, Any] = qar_model.eval()
else:
__a , __a : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
__a : List[Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''')
__a : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''').to('''cuda:0''')
__a : Optional[int] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''')
sas_model.load_state_dict(save_dict['''model'''])
__a : List[Any] = sas_model.eval()
else:
__a , __a : List[str] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''')
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
__a : str = faiss.StandardGpuResources()
__a : Tuple = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''')['''train''']
__a : Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
__a : Dict = faiss.IndexFlatIP(1_28)
__a : Optional[int] = faiss.index_cpu_to_gpu(a_ , 1 , a_)
wikiaab_gpu_index_flat.add(a_) # TODO fix for larger GPU
else:
__a , __a : List[str] = (None, None)
__a : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Union[str, Any]:
__a : Union[str, Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''')
__a : List[str] = elia['''train_eli5''']
__a : Tuple = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28))
__a : Any = faiss.IndexFlatIP(1_28)
eli5_train_q_index.add(a_)
return (elia_train, eli5_train_q_index)
A , A , A = load_indexes()
A , A , A , A = load_models()
A , A = load_train_data()
def __A ( a_ :str , a_ :str=10) -> Dict:
__a : List[Any] = embed_questions_for_retrieval([question] , a_ , a_)
__a , __a : Tuple = eli5_train_q_index.search(a_ , a_)
__a : Union[str, Any] = [elia_train[int(a_)] for i in I[0]]
return nn_examples
def __A ( a_ :List[Any] , a_ :int="wiki40b" , a_ :Any="dense" , a_ :Dict=10) -> Any:
if source == "none":
__a , __a : Any = (''' <P> '''.join(['''''' for _ in range(11)]).strip(), [])
else:
if method == "dense":
__a , __a : Optional[Any] = query_qa_dense_index(
a_ , a_ , a_ , a_ , a_ , a_)
else:
__a , __a : int = query_es_index(
a_ , a_ , index_name='''english_wiki40b_snippets_100w''' , n_results=a_ , )
__a : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a : Any = '''question: {} context: {}'''.format(a_ , a_)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a_: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a_: None),
})
def __A ( a_ :Tuple , a_ :Any , a_ :Tuple , a_ :List[Any]=64 , a_ :int=2_56 , a_ :Any=False , a_ :Dict=2 , a_ :Dict=0.9_5 , a_ :List[Any]=0.8) -> List[Any]:
with torch.no_grad():
__a : str = qa_sas_generate(
a_ , a_ , a_ , num_answers=1 , num_beams=a_ , min_len=a_ , max_len=a_ , do_sample=a_ , temp=a_ , top_p=a_ , top_k=a_ , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
A = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
A = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
A = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
A = st.sidebar.checkbox('''Demo options''')
if demo_options:
A = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
A = action_list.index(action_st)
A = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
A = show_type == '''Show full text of passages'''
else:
A = 3
A = True
A = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
A = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
A = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
A = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
A = '''wiki40b'''
A = '''dense'''
A = '''beam'''
A = 2
A = 64
A = 256
A = None
A = None
A = st.sidebar.checkbox('''Generation options''')
if generate_options:
A = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
A = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
A = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A = None
# start main text
A = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
A = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A = st.text_input('''Enter your question here:''', '''''')
else:
A = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
A , A = make_support(question, source=wiki_source, method='''dense''', n_results=10)
A , A = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A = support_list[:10]
A = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
A , A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A , A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
A = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
A = res[1].strip()
if sec_titles == "":
A = '''[{}]({})'''.format(res[0], wiki_url)
else:
A = sec_titles.split(''' & ''')
A = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
A = find_nearest_training(question)
A = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
A = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
A = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 52 | from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : Any = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ):
lowerCamelCase_: Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCamelCase_: int = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , A_ : MultilingualCLIP , A_ : XLMRobertaTokenizer , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, DDPMScheduler] , A_ : VQModel , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase ( self : Union[str, Any] , A_ : Dict , A_ : Any , A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] , A_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
lowerCamelCase_: Optional[Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_: Dict = latents.to(A_ )
lowerCamelCase_: Dict = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase ( self : List[str] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : int , A_ : List[str] , A_ : Any=None , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
lowerCamelCase_: Dict = self.tokenizer(
A_ , padding="""max_length""" , truncation=A_ , max_length=77 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
lowerCamelCase_: Union[str, Any] = text_inputs.input_ids
lowerCamelCase_: List[Any] = self.tokenizer(A_ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ):
lowerCamelCase_: Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase_: List[Any] = text_input_ids.to(A_ )
lowerCamelCase_: Dict = text_inputs.attention_mask.to(A_ )
lowerCamelCase_ , lowerCamelCase_: Dict = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
lowerCamelCase_: Any = prompt_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: Optional[Any] = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: Tuple = text_mask.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_: List[str]
if negative_prompt is None:
lowerCamelCase_: Dict = [""""""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
f""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
lowerCamelCase_: Optional[Any] = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCamelCase_: Tuple = negative_prompt
lowerCamelCase_: Tuple = self.tokenizer(
A_ , padding="""max_length""" , max_length=77 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
lowerCamelCase_: Tuple = uncond_input.input_ids.to(A_ )
lowerCamelCase_: Union[str, Any] = uncond_input.attention_mask.to(A_ )
lowerCamelCase_ , lowerCamelCase_: int = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_: List[str] = negative_prompt_embeds.shape[1]
lowerCamelCase_: Any = negative_prompt_embeds.repeat(1 , A_ )
lowerCamelCase_: Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ )
lowerCamelCase_: str = uncond_text_encoder_hidden_states.shape[1]
lowerCamelCase_: List[Any] = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 )
lowerCamelCase_: Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A_ , -1 )
lowerCamelCase_: Tuple = uncond_text_mask.repeat_interleave(A_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_: List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCamelCase_: int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCamelCase_: Union[str, Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase ( self : str , A_ : Dict=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase_: Any = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_: List[str] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def lowerCAmelCase ( self : List[Any] , A_ : Union[str, Any]=0 ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase_: Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_: int = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_: Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
if self.safety_checker is not None:
lowerCamelCase_ , lowerCamelCase_: int = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_: Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : Optional[Any] , A_ : Union[str, List[str]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Optional[Union[str, List[str]]] = None , A_ : int = 5_12 , A_ : int = 5_12 , A_ : int = 1_00 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Any:
"""simple docstring"""
if isinstance(A_ , A_ ):
lowerCamelCase_: List[str] = 1
elif isinstance(A_ , A_ ):
lowerCamelCase_: int = len(A_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
lowerCamelCase_: int = self._execution_device
lowerCamelCase_: Optional[int] = batch_size * num_images_per_prompt
lowerCamelCase_: Optional[int] = guidance_scale > 1.0
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: int = self._encode_prompt(
A_ , A_ , A_ , A_ , A_ )
if isinstance(A_ , A_ ):
lowerCamelCase_: int = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
lowerCamelCase_: Tuple = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_: List[str] = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: Optional[int] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_: List[str] = self.scheduler.timesteps
lowerCamelCase_: Union[str, Any] = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_: List[str] = get_new_h_w(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_: List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_: List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_: Tuple = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCamelCase_: Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_: Any = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_: Any = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_: str = variance_pred.chunk(2 )
lowerCamelCase_: List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_: Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_: Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_: Optional[int] = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , ).prev_sample
# post-processing
lowerCamelCase_: Dict = self.movq.decode(A_ , force_not_quantize=A_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_: str = image * 0.5 + 0.5
lowerCamelCase_: str = image.clamp(0 , 1 )
lowerCamelCase_: Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_: str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 423 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
# Load configuration defined in the metadata file
with open(_UpperCamelCase ) as metadata_file:
_a = json.load(_UpperCamelCase )
_a = LukeConfig(use_entity_aware_attention=_UpperCamelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a = torch.load(_UpperCamelCase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a = load_original_entity_vocab(_UpperCamelCase )
# add an entry for [MASK2]
_a = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a = AddedToken('''<ent>''' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
_a = AddedToken('''<ent2>''' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , '''r''' ) as f:
_a = json.load(_UpperCamelCase )
_a = '''MLukeTokenizer'''
with open(os.path.join(_UpperCamelCase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
_a = MLukeTokenizer.from_pretrained(_UpperCamelCase )
# Initialize the embeddings of the special tokens
_a = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a = state_dict['''embeddings.word_embeddings.weight''']
_a = word_emb[ent_init_index].unsqueeze(0 )
_a = word_emb[enta_init_index].unsqueeze(0 )
_a = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a = state_dict[bias_name]
_a = decoder_bias[ent_init_index].unsqueeze(0 )
_a = decoder_bias[enta_init_index].unsqueeze(0 )
_a = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a = f"encoder.layer.{layer_index}.attention.self."
_a = state_dict[prefix + matrix_name]
_a = state_dict[prefix + matrix_name]
_a = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a = state_dict['''entity_predictions.bias''']
_a = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a = LukeForMaskedLM(config=_UpperCamelCase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a = state_dict[key]
else:
_a = state_dict[key]
_a , _a = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if set(_UpperCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_UpperCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a = MLukeTokenizer.from_pretrained(_UpperCamelCase , task='''entity_classification''' )
_a = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a = (0, 9)
_a = tokenizer(_UpperCamelCase , entity_spans=[span] , return_tensors='''pt''' )
_a = model(**_UpperCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a = torch.Size((1, 33, 7_68) )
_a = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a = torch.Size((1, 1, 7_68) )
_a = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a = MLukeTokenizer.from_pretrained(_UpperCamelCase )
_a = '''Tokyo is the capital of <mask>.'''
_a = (24, 30)
_a = tokenizer(_UpperCamelCase , entity_spans=[span] , return_tensors='''pt''' )
_a = model(**_UpperCamelCase )
_a = encoding['''input_ids'''][0].tolist()
_a = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_UpperCamelCase )
_a = outputs.entity_logits[0][0].argmax().item()
_a = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_UpperCamelCase ) )
model.save_pretrained(_UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> Any:
_a = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a = [json.loads(_UpperCamelCase ) for line in open(_UpperCamelCase )]
_a = {}
for entry in data:
_a = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a = entity_id
break
_a = f"{language}:{entity_name}"
_a = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowerCamelCase :int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 346 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :List[str] = {}
class UpperCAmelCase ( __snake_case ):
a: str = "llama"
a: List[str] = ["past_key_values"]
def __init__( self: Tuple , __UpperCamelCase: Optional[Any]=3_2000 , __UpperCamelCase: Optional[int]=4096 , __UpperCamelCase: Union[str, Any]=1_1008 , __UpperCamelCase: str=32 , __UpperCamelCase: List[str]=32 , __UpperCamelCase: Tuple=None , __UpperCamelCase: Dict="silu" , __UpperCamelCase: Any=2048 , __UpperCamelCase: Optional[int]=0.0_2 , __UpperCamelCase: int=1E-6 , __UpperCamelCase: List[Any]=True , __UpperCamelCase: List[str]=0 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: str=2 , __UpperCamelCase: int=1 , __UpperCamelCase: Optional[Any]=False , __UpperCamelCase: int=None , **__UpperCamelCase: Optional[int] , ):
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_a = num_attention_heads
_a = num_key_value_heads
_a = hidden_act
_a = initializer_range
_a = rms_norm_eps
_a = pretraining_tp
_a = use_cache
_a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , )
def _A ( self: Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
_a = self.rope_scaling.get('''type''' , __UpperCamelCase )
_a = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 346 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE = object
def snake_case_ ( *lowercase__ , **lowercase__ ):
pass
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger("""transformers-cli/serving""")
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase__ , args.host , args.port , args.workers )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : dict
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : List[str]
lowercase_ : Optional[List[int]]
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : str
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : Any
class UpperCAmelCase_ ( A ):
'''simple docstring'''
@staticmethod
def UpperCamelCase ( snake_case__ : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=snake_case__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=snake_case__ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=snake_case__ , default=88_88 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=snake_case__ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=snake_case__ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=snake_case__ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=snake_case__ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=snake_case__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=snake_case__ )
def __init__( self : Tuple , snake_case__ : Pipeline , snake_case__ : str , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = pipeline
UpperCAmelCase__ : str = host
UpperCAmelCase__ : Dict = port
UpperCAmelCase__ : Dict = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F"""Serving model over {host}:{port}""" )
UpperCAmelCase__ : List[Any] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=snake_case__ , response_class=snake_case__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=snake_case__ , response_class=snake_case__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=snake_case__ , response_class=snake_case__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=snake_case__ , response_class=snake_case__ , methods=["POST"] , ),
] , timeout=6_00 , )
def UpperCamelCase ( self : int ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase ( self : Tuple , snake_case__ : str = Body(snake_case__ , embed=snake_case__ ) , snake_case__ : bool = Body(snake_case__ , embed=snake_case__ ) ):
'''simple docstring'''
try:
UpperCAmelCase__ : Optional[Any] = self._pipeline.tokenizer.tokenize(snake_case__ )
if return_ids:
UpperCAmelCase__ : List[str] = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case__ )
return ServeTokenizeResult(tokens=snake_case__ , tokens_ids=snake_case__ )
else:
return ServeTokenizeResult(tokens=snake_case__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(snake_case__ )} )
def UpperCamelCase ( self : Any , snake_case__ : List[int] = Body(snake_case__ , embed=snake_case__ ) , snake_case__ : bool = Body(snake_case__ , embed=snake_case__ ) , snake_case__ : bool = Body(snake_case__ , embed=snake_case__ ) , ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[Any] = self._pipeline.tokenizer.decode(snake_case__ , snake_case__ , snake_case__ )
return ServeDeTokenizeResult(model="" , text=snake_case__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(snake_case__ )} )
async def UpperCamelCase ( self : int , snake_case__ : Tuple=Body(snake_case__ , embed=snake_case__ ) ):
'''simple docstring'''
if len(snake_case__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
UpperCAmelCase__ : Tuple = self._pipeline(snake_case__ )
return ServeForwardResult(output=snake_case__ )
except Exception as e:
raise HTTPException(5_00 , {"error": str(snake_case__ )} )
| 199 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : int = ["input_values", "attention_mask"]
def __init__( self : Any , snake_case__ : int = 1 , snake_case__ : int = 1_60_00 , snake_case__ : float = 0.0 , snake_case__ : bool = False , snake_case__ : int = 80 , snake_case__ : int = 16 , snake_case__ : int = 64 , snake_case__ : str = "hann_window" , snake_case__ : float = 1.0 , snake_case__ : float = 80 , snake_case__ : float = 76_00 , snake_case__ : float = 1e-10 , snake_case__ : int = 2 , snake_case__ : bool = True , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , **snake_case__ )
UpperCAmelCase__ : int = do_normalize
UpperCAmelCase__ : Tuple = return_attention_mask
UpperCAmelCase__ : Union[str, Any] = num_mel_bins
UpperCAmelCase__ : List[str] = hop_length
UpperCAmelCase__ : List[str] = win_length
UpperCAmelCase__ : Union[str, Any] = win_function
UpperCAmelCase__ : str = frame_signal_scale
UpperCAmelCase__ : int = fmin
UpperCAmelCase__ : Union[str, Any] = fmax
UpperCAmelCase__ : Optional[Any] = mel_floor
UpperCAmelCase__ : List[str] = reduction_factor
UpperCAmelCase__ : str = win_length * sampling_rate // 10_00
UpperCAmelCase__ : Union[str, Any] = hop_length * sampling_rate // 10_00
UpperCAmelCase__ : Optional[Any] = optimal_fft_length(self.sample_size )
UpperCAmelCase__ : str = (self.n_fft // 2) + 1
UpperCAmelCase__ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case__ )
UpperCAmelCase__ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case__ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase ( snake_case__ : List[np.ndarray] , snake_case__ : List[np.ndarray] , snake_case__ : float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
UpperCAmelCase__ : Tuple = np.array(snake_case__ , np.intaa )
UpperCAmelCase__ : Tuple = []
for vector, length in zip(snake_case__ , attention_mask.sum(-1 ) ):
UpperCAmelCase__ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ : Optional[int] = padding_value
normed_input_values.append(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase ( self : Tuple , snake_case__ : np.ndarray , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = spectrogram(
snake_case__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self : List[Any] , snake_case__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , snake_case__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[int] = None , **snake_case__ : List[Any] , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
UpperCAmelCase__ : Union[str, Any] = self._process_audio(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , )
else:
UpperCAmelCase__ : str = None
if audio_target is not None:
UpperCAmelCase__ : str = self._process_audio(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , )
if inputs is None:
return inputs_target
else:
UpperCAmelCase__ : Union[str, Any] = inputs_target["input_values"]
UpperCAmelCase__ : Any = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCAmelCase__ : Dict = decoder_attention_mask
return inputs
def UpperCamelCase ( self : List[Any] , snake_case__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case__ : bool = False , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : int , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = isinstance(snake_case__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ : Union[str, Any] = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : List[Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
UpperCAmelCase__ : int = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : str = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase__ : str = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase__ : List[Any] = [self._extract_mel_features(snake_case__ ) for waveform in speech]
UpperCAmelCase__ : List[Any] = BatchFeature({"input_values": features} )
UpperCAmelCase__ : List[str] = self.num_mel_bins
else:
UpperCAmelCase__ : List[str] = BatchFeature({"input_values": speech} )
UpperCAmelCase__ : Optional[Any] = self.pad(
snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : List[str] = feature_size_hack
# convert input values to correct format
UpperCAmelCase__ : Tuple = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
UpperCAmelCase__ : List[Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(snake_case__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase__ : str = [array.astype(np.floataa ) for array in input_values]
elif isinstance(snake_case__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : str = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase__ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase__ : int = [np.asarray(snake_case__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase__ : Any = (
attention_mask
if self._get_padding_strategies(snake_case__ , max_length=snake_case__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ : int = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=snake_case__ , padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(snake_case__ )
return padded_inputs
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase__ : Dict = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 199 | 1 |
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: Tuple ,lowerCAmelCase__: Tuple ) -> List[str]:
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 ,number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 709 |
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE_ = i
for k in range(i + 1 ,lowerCAmelCase__ ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE_ = k
if least != i:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : Any = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 238 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
UpperCAmelCase__ : str = TOKENIZER_CLASSES
else:
UpperCAmelCase__ : str = {tokenizer_name: getattr(lowerCAmelCase__ , tokenizer_name + '''Fast''' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase__ : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase__ : str = True
if checkpoint_name is None:
UpperCAmelCase__ : str = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase__ : Tuple = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained(lowerCAmelCase__ , force_download=lowerCAmelCase__ )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = checkpoint.split('''/''' )
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
elif add_prefix:
UpperCAmelCase__ : Tuple = checkpoint
UpperCAmelCase__ : Any = dump_path
else:
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Dict = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase__ : Union[str, Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase__ : Optional[int] = file_path.split(lowerCAmelCase__ )[-1][0]
if next_char == "/":
UpperCAmelCase__ : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
UpperCAmelCase__ : Optional[Any] = tokenizer.save_pretrained(
lowerCAmelCase__ , legacy_format=lowerCAmelCase__ , filename_prefix=lowerCAmelCase__ )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(lowerCAmelCase__ )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
UpperCamelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 75 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase__ : Any = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase_ = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
UpperCAmelCase_ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowercase : Optional[Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__lowercase : Optional[Any] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self : int , **__lowercase : int ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
UpperCAmelCase_ = self.get_image_processor(do_normalize=__lowercase )
UpperCAmelCase_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(__lowercase , return_tensors="""np""" )
UpperCAmelCase_ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ = processor(text=__lowercase )
UpperCAmelCase_ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(__lowercase )
UpperCAmelCase_ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = ChineseCLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase_ = """Alexandra,T-shirt的价格是15便士。"""
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 486 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict =False, False, False
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
A__ : Optional[int] = None
A__ : bool = True
A__ : bool = True
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "dict"
A__ : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
A__ : str = field(default='''Audio''' , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def a__ ( self , A ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(A , A ):
return {"bytes": None, "path": value}
elif isinstance(A , A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A: List[str] = BytesIO()
sf.write(A , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A: List[Any] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
A: Optional[int] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
A: Optional[int] = BytesIO(bytes() )
sf.write(A , A , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def a__ ( self , A , A = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
A , A: Dict = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
A: Optional[int] = xsplitext(A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
A: Tuple = token_per_repo_id or {}
A: List[Any] = path.split("""::""" )[-1]
try:
A: Tuple = string_to_dict(A , config.HUB_DATASETS_URL )["""repo_id"""]
A: int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A: Dict = None
with xopen(A , """rb""" , use_auth_token=A ) as f:
A , A: Optional[int] = sf.read(A )
else:
A , A: str = sf.read(A )
A: str = array.T
if self.mono:
A: Optional[Any] = librosa.to_mono(A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A: List[Any] = librosa.resample(A , orig_sr=A , target_sr=self.sampling_rate )
A: Optional[int] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def a__ ( self , A ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
A: List[Any] = pa.array([None] * len(A ) , type=pa.binary() )
A: Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A: int = pa.array([None] * len(A ) , type=pa.string() )
A: int = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
A: Tuple = pa.array([Audio().encode_example(A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A: Any = storage.field("""bytes""" )
else:
A: str = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A: int = storage.field("""path""" )
else:
A: Dict = pa.array([None] * len(A ) , type=pa.string() )
A: Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(A , self.pa_type )
def a__ ( self , A ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A ):
with xopen(A , """rb""" ) as f:
A: Dict = f.read()
return bytes_
A: Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A: Optional[int] = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A: int = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
| 135 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Union[str, Any] = (DDIMParallelScheduler,)
A__ : Optional[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def a__ ( self , **A ) -> Union[str, Any]:
A: str = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**A )
return config
def a__ ( self , **A ) -> Tuple:
A: Optional[int] = self.scheduler_classes[0]
A: Optional[Any] = self.get_scheduler_config(**A )
A: int = scheduler_class(**A )
A , A: Union[str, Any] = 10, 0.0
A: List[str] = self.dummy_model()
A: Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(A )
for t in scheduler.timesteps:
A: List[str] = model(A , A )
A: Optional[int] = scheduler.step(A , A , A , A ).prev_sample
return sample
def a__ ( self ) -> Dict:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def a__ ( self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A )
A: List[Any] = self.scheduler_classes[0]
A: List[Any] = self.get_scheduler_config(steps_offset=1 )
A: int = scheduler_class(**A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def a__ ( self ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def a__ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def a__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def a__ ( self ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def a__ ( self ) -> Optional[int]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=A )
def a__ ( self ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=A )
def a__ ( self ) -> Tuple:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def a__ ( self ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=A )
def a__ ( self ) -> int:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=A , num_inference_steps=A )
def a__ ( self ) -> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=A , eta=A )
def a__ ( self ) -> Union[str, Any]:
A: Tuple = self.scheduler_classes[0]
A: List[Any] = self.get_scheduler_config()
A: int = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Dict:
A: Optional[Any] = self.scheduler_classes[0]
A: int = self.get_scheduler_config()
A: int = scheduler_class(**A )
A , A: str = 10, 0.0
scheduler.set_timesteps(A )
A: Tuple = self.dummy_model()
A: Optional[int] = self.dummy_sample_deter
A: int = self.dummy_sample_deter + 0.1
A: List[str] = self.dummy_sample_deter - 0.1
A: Any = samplea.shape[0]
A: int = torch.stack([samplea, samplea, samplea] , dim=0 )
A: Optional[Any] = torch.arange(A )[0:3, None].repeat(1 , A )
A: List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A: Dict = scheduler.batch_step_no_noise(A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A )
A: Tuple = torch.sum(torch.abs(A ) )
A: Tuple = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def a__ ( self ) -> Tuple:
A: Dict = self.full_loop()
A: str = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
A: str = self.full_loop(prediction_type="""v_prediction""" )
A: Any = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def a__ ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
A: Dict = self.full_loop(set_alpha_to_one=A , beta_start=0.01 )
A: Tuple = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def a__ ( self ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
A: List[Any] = self.full_loop(set_alpha_to_one=A , beta_start=0.01 )
A: List[Any] = torch.sum(torch.abs(A ) )
A: Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 135 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
__a : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
__a : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
__a : str = "question"
__a : str = "context"
__a : str = "answers"
@property
def snake_case_ ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 263 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ : Tuple = logging.get_logger(__name__)
# General docstring
a_ : List[str] = '''PoolFormerConfig'''
# Base docstring
a_ : Optional[Any] = '''sail/poolformer_s12'''
a_ : List[Any] = [1, 5_12, 7, 7]
# Image classification docstring
a_ : Any = '''sail/poolformer_s12'''
a_ : Optional[int] = '''tabby, tabby cat'''
a_ : Optional[Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase ( A__: Optional[Any] , A__: float = 0.0 , A__: bool = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCamelCase : Dict = 1 - drop_prob
__lowerCamelCase : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCamelCase : List[Any] = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowerCamelCase : Any = input.div(A__ ) * random_tensor
return output
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a = None ):
super().__init__()
__lowerCamelCase : int = drop_prob
def snake_case_ ( self , __a ):
return drop_path(__a , self.drop_prob , self.training )
def snake_case_ ( self ):
return "p={}".format(self.drop_prob )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a=None ):
super().__init__()
__lowerCamelCase : int = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCamelCase : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride)
__lowerCamelCase : Optional[int] = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding)
__lowerCamelCase : Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a )
__lowerCamelCase : List[str] = norm_layer(__a ) if norm_layer else nn.Identity()
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.projection(__a )
__lowerCamelCase : Dict = self.norm(__a )
return embeddings
class __lowercase( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __a , **__a ):
super().__init__(1 , __a , **__a )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : str = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a )
def snake_case_ ( self , __a ):
return self.pool(__a ) - hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Any = nn.Convad(__a , __a , 1 )
__lowerCamelCase : Dict = nn.Convad(__a , __a , 1 )
__lowerCamelCase : List[Any] = PoolFormerDropPath(__a )
if isinstance(config.hidden_act , __a ):
__lowerCamelCase : List[str] = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : str = config.hidden_act
def snake_case_ ( self , __a ):
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : Dict = self.act_fn(__a )
__lowerCamelCase : List[str] = self.drop(__a )
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : str = self.drop(__a )
return hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Tuple = PoolFormerPooling(__a )
__lowerCamelCase : Union[str, Any] = PoolFormerOutput(__a , __a , __a , __a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
# Useful for training neural nets
__lowerCamelCase : Any = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()
__lowerCamelCase : Tuple = config.use_layer_scale
if config.use_layer_scale:
__lowerCamelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
__lowerCamelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
def snake_case_ ( self , __a ):
if self.use_layer_scale:
__lowerCamelCase : Union[str, Any] = self.pooling(self.before_norm(__a ) )
__lowerCamelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCamelCase : Optional[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Tuple = ()
__lowerCamelCase : Optional[Any] = self.output(self.after_norm(__a ) )
__lowerCamelCase : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCamelCase : List[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Optional[Any] = (output,) + outputs
return outputs
else:
__lowerCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__a ) ) )
# First residual connection
__lowerCamelCase : List[str] = pooling_output + hidden_states
__lowerCamelCase : int = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCamelCase : List[str] = self.drop_path(self.output(self.after_norm(__a ) ) )
__lowerCamelCase : str = hidden_states + layer_output
__lowerCamelCase : int = (output,) + outputs
return outputs
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : int = config
# stochastic depth decay rule
__lowerCamelCase : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCamelCase : List[str] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCamelCase : Optional[int] = nn.ModuleList(__a )
# Transformer blocks
__lowerCamelCase : Any = []
__lowerCamelCase : int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCamelCase : Optional[int] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__a ) )
__lowerCamelCase : str = nn.ModuleList(__a )
def snake_case_ ( self , __a , __a=False , __a=True ):
__lowerCamelCase : Union[str, Any] = () if output_hidden_states else None
__lowerCamelCase : int = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCamelCase , __lowerCamelCase : Any = layers
# Get patch embeddings from hidden_states
__lowerCamelCase : Any = embedding_layer(__a )
# Send the embeddings through the blocks
for _, blk in enumerate(__a ):
__lowerCamelCase : Optional[int] = blk(__a )
__lowerCamelCase : Tuple = layer_outputs[0]
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Tuple = PoolFormerConfig
__a : Tuple = 'poolformer'
__a : Optional[int] = 'pixel_values'
__a : Optional[Any] = True
def snake_case_ ( self , __a ):
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_ ( self , __a , __a=False ):
if isinstance(__a , __a ):
__lowerCamelCase : Union[str, Any] = value
a_ : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a_ : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : Optional[Any] = config
__lowerCamelCase : Any = PoolFormerEncoder(__a )
# Initialize weights and apply final processing
self.post_init()
def snake_case_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_ ( self , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowerCamelCase : Any = self.encoder(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.dense(__a )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : str = config.num_labels
__lowerCamelCase : Optional[Any] = PoolFormerModel(__a )
# Final norm
__lowerCamelCase : str = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCamelCase : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_ ( self , __a = None , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.poolformer(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = outputs[0]
__lowerCamelCase : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) )
__lowerCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : Any = 'single_label_classification'
else:
__lowerCamelCase : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : Tuple = CrossEntropyLoss()
__lowerCamelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : List[Any] = BCEWithLogitsLoss()
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
if not return_dict:
__lowerCamelCase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 263 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = downstream_dict["""projector.weight"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["""projector.bias"""]
__SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
__SCREAMING_SNAKE_CASE : str = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: Any , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["""model.linear.weight"""]
__SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["""model.linear.bias"""]
return model
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Dict = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""connector.weight"""]
__SCREAMING_SNAKE_CASE : List[str] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE : List[str] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__SCREAMING_SNAKE_CASE : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__SCREAMING_SNAKE_CASE : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__SCREAMING_SNAKE_CASE : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__SCREAMING_SNAKE_CASE : Tuple = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : int = torch.load(_lowerCamelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE : Tuple = checkpoint["""Downstream"""]
__SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__SCREAMING_SNAKE_CASE : str = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__SCREAMING_SNAKE_CASE : int = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE : Optional[int] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase__ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 578 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Optional[int] = None
_A : Optional[jnp.ndarray] = None
_A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls : List[Any] ):
"""simple docstring"""
return cls()
@dataclass
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : jnp.ndarray
_A : jnp.ndarray
_A : KarrasVeSchedulerState
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Tuple , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1_0_0 , lowerCAmelCase__ : float = 1.0_07 , lowerCAmelCase__ : float = 8_0 , lowerCAmelCase__ : float = 0.05 , lowerCAmelCase__ : float = 5_0 , ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple = () ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = jnp.arange(0 , lowerCAmelCase__ )[::-1].copy()
__SCREAMING_SNAKE_CASE : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase__ , schedule=jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__SCREAMING_SNAKE_CASE : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__SCREAMING_SNAKE_CASE : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
__SCREAMING_SNAKE_CASE : Any = random.split(lowerCAmelCase__ , num=1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.config.s_noise * random.normal(key=lowerCAmelCase__ , shape=sample.shape )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigma + gamma * sigma
__SCREAMING_SNAKE_CASE : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_hat + sigma_hat * model_output
__SCREAMING_SNAKE_CASE : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
__SCREAMING_SNAKE_CASE : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : jnp.ndarray , lowerCAmelCase__ : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = sample_prev + sigma_prev * model_output
__SCREAMING_SNAKE_CASE : Dict = (sample_prev - pred_original_sample) / sigma_prev
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , state=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : KarrasVeSchedulerState , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError() | 578 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
_A = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
_A = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
_A = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
_A = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
_A = tokenizer(["Making tiny model"], return_tensors="pt")
_A = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 279 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A = datasets.load_iris()
_A = np.array(data["data"])
_A = np.array(data["target"])
_A = data["target_names"]
_A, _A, _A, _A = train_test_split(X, y)
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(__lowerCAmelCase ) - np.array(__lowerCAmelCase ) )
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any=5 ):
"""simple docstring"""
lowerCAmelCase_ = zip(__lowerCAmelCase , __lowerCAmelCase )
# List of distances of all points from the point to be classified
lowerCAmelCase_ = []
for data_point in data:
lowerCAmelCase_ = euclidean_distance(data_point[0] , __lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase_ = [i[1] for i in sorted(__lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase_ = Counter(__lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 279 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : List[Any] = "The quick brown fox jumps over the lazy dog" , ) -> str:
lowercase : Dict =set()
# Replace all the whitespace in our sentence
lowercase : Any =input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE__ ) == 26
def _lowerCAmelCase ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> Any:
lowercase : str =[False] * 26
for char in input_str:
if char.islower():
lowercase : str =True
elif char.isupper():
lowercase : List[Any] =True
return all(SCREAMING_SNAKE_CASE__ )
def _lowerCAmelCase ( __magic_name__ : Optional[int] = "The quick brown fox jumps over the lazy dog" , ) -> List[str]:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _lowerCAmelCase ( ) -> Tuple:
from timeit import timeit
lowercase : Optional[int] ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE__ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE__ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Dict=7 , __UpperCamelCase :Dict=3 , __UpperCamelCase :List[str]=10 , __UpperCamelCase :Optional[int]=18 , __UpperCamelCase :Optional[Any]=30 , __UpperCamelCase :Optional[int]=4_00 , __UpperCamelCase :Tuple=True , __UpperCamelCase :Union[str, Any]=None , __UpperCamelCase :Dict=True , __UpperCamelCase :List[str]=[0.5, 0.5, 0.5] , __UpperCamelCase :List[Any]=[0.5, 0.5, 0.5] , __UpperCamelCase :Optional[int]=None , ):
A = size if size is not None else {"shortest_edge": 18}
A = crop_size if crop_size is not None else {"height": 18, "width": 18}
A = parent
A = batch_size
A = num_channels
A = num_frames
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
A = image_mean
A = image_std
A = crop_size
def lowerCamelCase ( self :List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase ( self :int ):
A = VivitImageProcessingTester(self )
@property
def lowerCamelCase ( self :str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self :str ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase ( self :List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
A = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
A = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
A = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self :Tuple ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
A = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 524 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[int] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_snake_case : Union[str, Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = GPTaTokenizer
def __init__( self :Optional[Any] , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :str="<|endoftext|>" , __UpperCamelCase :Tuple="<|endoftext|>" , __UpperCamelCase :Dict="<|endoftext|>" , __UpperCamelCase :Union[str, Any]=False , **__UpperCamelCase :Union[str, Any] , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
A = kwargs.pop("add_bos_token" , __UpperCamelCase )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
A = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
A = add_prefix_space
A = pre_tok_class(**__UpperCamelCase )
A = add_prefix_space
def lowerCamelCase ( self :Any , *__UpperCamelCase :Optional[int] , **__UpperCamelCase :Any ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Dict , *__UpperCamelCase :List[str] , **__UpperCamelCase :Optional[int] ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
A = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :"Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
| 524 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """gptsan-japanese"""
__lowercase = [
"""past_key_values""",
]
__lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowercase_ :Any=3_60_00 , lowercase_ :List[str]=12_80 , lowercase_ :Union[str, Any]=10_24 , lowercase_ :Union[str, Any]=81_92 , lowercase_ :Dict=40_96 , lowercase_ :int=1_28 , lowercase_ :int=10 , lowercase_ :Dict=0 , lowercase_ :int=16 , lowercase_ :Any=16 , lowercase_ :Union[str, Any]=1_28 , lowercase_ :Dict=0.0 , lowercase_ :Dict=1E-5 , lowercase_ :int=False , lowercase_ :Tuple=0.0 , lowercase_ :Union[str, Any]="float32" , lowercase_ :Union[str, Any]=False , lowercase_ :int=False , lowercase_ :List[str]=False , lowercase_ :Any=0.0_0_2 , lowercase_ :str=False , lowercase_ :int=True , lowercase_ :List[str]=3_59_98 , lowercase_ :Any=3_59_95 , lowercase_ :List[Any]=3_59_99 , **lowercase_ :Optional[int] , )-> List[str]:
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = d_ff
A__ = d_ext
A__ = d_spout
A__ = num_switch_layers
A__ = num_ext_layers
A__ = num_switch_layers + num_ext_layers
A__ = num_heads
A__ = num_experts
A__ = expert_capacity
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = router_bias
A__ = router_jitter_noise
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = output_hidden_states
A__ = output_attentions
A__ = initializer_factor
A__ = output_router_logits
A__ = use_cache
super().__init__(
separator_token_id=lowercase_ , pad_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
| 440 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""vqvae"""]
def __init__( self :Dict , lowercase_ :AutoencoderKL , lowercase_ :UNetaDConditionModel , lowercase_ :Mel , lowercase_ :Union[DDIMScheduler, DDPMScheduler] , )-> Optional[Any]:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> int:
return 50 if isinstance(self.scheduler , lowercase_ ) else 10_00
@torch.no_grad()
def __call__( self :Optional[Any] , lowercase_ :int = 1 , lowercase_ :str = None , lowercase_ :np.ndarray = None , lowercase_ :int = 0 , lowercase_ :int = 0 , lowercase_ :int = None , lowercase_ :torch.Generator = None , lowercase_ :float = 0 , lowercase_ :float = 0 , lowercase_ :torch.Generator = None , lowercase_ :float = 0 , lowercase_ :torch.Tensor = None , lowercase_ :torch.Tensor = None , lowercase_ :Tuple=True , )-> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
A__ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
A__ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A__ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A__ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
A__ = noise
A__ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
A__ = self.mel.audio_slice_to_image(lowercase_ )
A__ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
A__ = (input_image / 2_55) * 2 - 1
A__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A__ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
A__ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A__ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
A__ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A__ = int(mask_start_secs * pixels_per_second )
A__ = int(mask_end_secs * pixels_per_second )
A__ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
A__ = self.unet(lowercase_ , lowercase_ , lowercase_ )["sample"]
else:
A__ = self.unet(lowercase_ , lowercase_ )["sample"]
if isinstance(self.scheduler , lowercase_ ):
A__ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )["prev_sample"]
else:
A__ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
A__ = mask[:, step, :, :mask_start]
if mask_end > 0:
A__ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A__ = 1 / self.vqvae.config.scaling_factor * images
A__ = self.vqvae.decode(lowercase_ )["sample"]
A__ = (images / 2 + 0.5).clamp(0 , 1 )
A__ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
A__ = (images * 2_55).round().astype("uint8" )
A__ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode="RGB" ).convert("L" ) for _ in images) )
A__ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :List[Image.Image] , lowercase_ :int = 50 )-> np.ndarray:
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
A__ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
A__ = (sample / 2_55) * 2 - 1
A__ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
A__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A__ = self.scheduler.alphas_cumprod[t]
A__ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
A__ = self.unet(lowercase_ , lowercase_ )["sample"]
A__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase_ ( lowercase_ :torch.Tensor , lowercase_ :torch.Tensor , lowercase_ :float )-> torch.Tensor:
A__ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 440 | 1 |
def lowerCamelCase__ ( _a = 100):
SCREAMING_SNAKE_CASE : Dict = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE : Tuple = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''') | 193 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , a : int , a : Optional[int]=7 , a : List[Any]=3 , a : Any=30 , a : Dict=400 , a : str=True , a : List[Any]=None , a : List[str]=True , a : Optional[Any]=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , a : Union[str, Any]=True , a : Tuple=1 / 255 , a : Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : Dict = image_std
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_pad
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : Any , a : Union[str, Any] , a : int=False ) -> int:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE : Tuple = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = image.size
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : Optional[Any] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE : str = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : str = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE : Dict = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : str = []
for image in image_inputs:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Dict = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "do_rescale" ) )
self.assertTrue(hasattr(a , "do_pad" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
SCREAMING_SNAKE_CASE : str = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE : int = DetaImageProcessor()
SCREAMING_SNAKE_CASE : Tuple = image_processing(images=a , annotations=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Any = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : str = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify orig_size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE : Any = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE : Optional[Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE : str = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) )
# verify boxes
SCREAMING_SNAKE_CASE : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , a )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) )
# verify masks
SCREAMING_SNAKE_CASE : Tuple = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a )
# verify orig_size
SCREAMING_SNAKE_CASE : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) )
# verify size
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) ) | 193 | 1 |
from math import isqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10**6 ) -> int:
snake_case__ = 0
snake_case__ = 1
snake_case__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,head_mask=A )
UpperCAmelCase = model(A ,token_type_ids=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,n_embd=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(A )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] ,dtype=torch.long ,device=A ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(A ,do_sample=A )
self.assertListEqual(output_ids[0].tolist() ,A )
| 341 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( _A: Optional[int] , _A: Optional[Any] , _A: Union[str, Any] , _A: Any="attention" ):
'''simple docstring'''
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def UpperCamelCase__ ( _A: Dict , _A: Union[str, Any] , _A: Any , _A: Any=False ):
'''simple docstring'''
if split_mlp_wi:
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__lowerCamelCase = (wi_a, wi_a)
else:
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
__lowerCamelCase = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def UpperCamelCase__ ( _A: Optional[Any] , _A: str , _A: Tuple , _A: Tuple ):
'''simple docstring'''
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def UpperCamelCase__ ( _A: dict , *, _A: int , _A: bool ):
'''simple docstring'''
__lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
__lowerCamelCase = {"""/""".join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCamelCase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , _A )
__lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
__lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_attention_layer_norm""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = tax_attention_lookup(_A , _A , """encoder""" , """attention""" )
__lowerCamelCase = layer_norm
__lowerCamelCase = k.T
__lowerCamelCase = o.T
__lowerCamelCase = q.T
__lowerCamelCase = v.T
# Block i, layer 1 (MLP).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_mlp_layer_norm""" )
__lowerCamelCase , __lowerCamelCase = tax_mlp_lookup(_A , _A , """encoder""" , _A )
__lowerCamelCase = layer_norm
if split_mlp_wi:
__lowerCamelCase = wi[0].T
__lowerCamelCase = wi[1].T
else:
__lowerCamelCase = wi.T
__lowerCamelCase = wo.T
__lowerCamelCase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
__lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_self_attention_layer_norm""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = tax_attention_lookup(_A , _A , """decoder""" , """self_attention""" )
__lowerCamelCase = layer_norm
__lowerCamelCase = k.T
__lowerCamelCase = o.T
__lowerCamelCase = q.T
__lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = tax_attention_lookup(_A , _A , """decoder""" , """encoder_decoder_attention""" )
__lowerCamelCase = layer_norm
__lowerCamelCase = k.T
__lowerCamelCase = o.T
__lowerCamelCase = q.T
__lowerCamelCase = v.T
# Block i, layer 2 (MLP).
__lowerCamelCase = tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_mlp_layer_norm""" )
__lowerCamelCase , __lowerCamelCase = tax_mlp_lookup(_A , _A , """decoder""" , _A )
__lowerCamelCase = layer_norm
if split_mlp_wi:
__lowerCamelCase = wi[0].T
__lowerCamelCase = wi[1].T
else:
__lowerCamelCase = wi.T
__lowerCamelCase = wo.T
__lowerCamelCase = old["""decoder/decoder_norm/scale"""]
__lowerCamelCase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCamelCase__ ( _A: Optional[int] , _A: bool ):
'''simple docstring'''
__lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def UpperCamelCase__ ( _A: Union[str, Any] , _A: Tuple , _A: List[str] , _A: Optional[int] ):
'''simple docstring'''
__lowerCamelCase = checkpoints.load_tax_checkpoint(_A )
__lowerCamelCase = convert_tax_to_pytorch(_A , num_layers=config.num_layers , is_encoder_only=_A )
__lowerCamelCase = make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def UpperCamelCase__ ( _A: Union[str, Any] , _A: Optional[int] , _A: Dict , _A: bool = False ):
'''simple docstring'''
__lowerCamelCase = TaConfig.from_json_file(_A )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCamelCase = TaEncoderModel(_A )
else:
__lowerCamelCase = TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("""Done""" )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
_a : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 571 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCamelCase__ ( _A: Optional[Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = image.size
__lowerCamelCase , __lowerCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
__lowerCamelCase = np.array(_A ).astype(np.floataa ) / 255.0
__lowerCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
__lowerCamelCase = torch.from_numpy(_A )
return 2.0 * image - 1.0
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__lowerCamelCase = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
__lowerCamelCase = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__lowerCamelCase = preprocess(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowerCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowerCamelCase = next(self.unet.parameters() ).dtype
__lowerCamelCase = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
__lowerCamelCase = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
__lowerCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase = {}
if accepts_eta:
__lowerCamelCase = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
__lowerCamelCase = torch.cat([latents, image] , dim=1 )
__lowerCamelCase = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
__lowerCamelCase = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
__lowerCamelCase = self.vqvae.decode(UpperCAmelCase ).sample
__lowerCamelCase = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
__lowerCamelCase = image / 2 + 0.5
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 571 | 1 |
'''simple docstring'''
import sys
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
lowerCamelCase_ = [[0 for x in range(__UpperCamelCase )] for x in range(__UpperCamelCase )]
for chain_length in range(2 ,__UpperCamelCase ):
for a in range(1 ,n - chain_length + 1 ):
lowerCamelCase_ = a + chain_length - 1
lowerCamelCase_ = sys.maxsize
for c in range(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase_ = cost
lowerCamelCase_ = c
return matrix, sol
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
if i == j:
print('A' + str(__UpperCamelCase ) ,end=' ' )
else:
print('(' ,end=' ' )
print_optiomal_solution(__UpperCamelCase ,__UpperCamelCase ,optimal_solution[i][j] )
print_optiomal_solution(__UpperCamelCase ,optimal_solution[i][j] + 1 ,__UpperCamelCase )
print(')' ,end=' ' )
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = [30, 35, 15, 5, 10, 20, 25]
lowerCamelCase_ = len(__UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase_ ,lowerCamelCase_ = matrix_chain_order(__UpperCamelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCamelCase ,1 ,n - 1 )
if __name__ == "__main__":
main()
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42 | 1 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , )-> int:
lowerCamelCase_ =parent
lowerCamelCase_ =vocab_size
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ =(image_size // patch_size) ** 2
lowerCamelCase_ =num_patches + 1
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =self.type_sequence_label_size
lowerCamelCase_ =FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ =1
lowerCamelCase_ =FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Tuple = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _snake_case ( self )-> None:
lowerCamelCase_ =FlaxBeitModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> int:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ =model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ =model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
for model_class_name in self.all_model_classes:
lowerCamelCase_ =model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
lowerCamelCase_ =model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def _snake_case ( self )-> Union[str, Any]:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
lowerCamelCase_ =np.ones((1, 196) , dtype=_SCREAMING_SNAKE_CASE )
# forward pass
lowerCamelCase_ =model(pixel_values=_SCREAMING_SNAKE_CASE , bool_masked_pos=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =(1, 196, 8192)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# forward pass
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =(1, 1000)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowerCamelCase_ =281
self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# forward pass
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =(1, 2_1841)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowerCamelCase_ =2396
self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE )
| 75 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )-> Tuple:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_stages
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =out_features
lowerCamelCase_ =num_labels
lowerCamelCase_ =scope
lowerCamelCase_ =num_stages
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def _snake_case ( self )-> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _snake_case ( self )-> Union[str, Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self )-> str:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCamelCase:Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Dict = False
_UpperCamelCase:int = False
_UpperCamelCase:Any = False
_UpperCamelCase:Optional[Any] = False
_UpperCamelCase:Optional[Any] = False
def _snake_case ( self )-> int:
lowerCamelCase_ =UperNetModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )-> Tuple:
return
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _snake_case ( self )-> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self )-> List[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self )-> str:
pass
def _snake_case ( self )-> Optional[int]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =_config_zero_init(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _snake_case ( self )-> Dict:
pass
@slow
def _snake_case ( self )-> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
lowerCamelCase_ =Image.open(_A ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
lowerCamelCase_ =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 75 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__=1_2_5 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ : Any = [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ : str = len(set(filter(lambda lowerCAmelCase__ : bool('extra_id' in str(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
SCREAMING_SNAKE_CASE_ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = extra_ids
SCREAMING_SNAKE_CASE_ : Dict = 2**8 # utf is 8 bits
# define special tokens dict
SCREAMING_SNAKE_CASE_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
SCREAMING_SNAKE_CASE_ : int = len(self.special_tokens_encoder )
SCREAMING_SNAKE_CASE_ : str = len(lowerCAmelCase__ )
for i, token in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.vocab_size + i - n
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase__ )) + [1]
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if len(lowerCAmelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._add_eos_if_not_present(lowerCAmelCase__ )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ : Dict = self._add_eos_if_not_present(lowerCAmelCase__ )
return token_ids_a + token_ids_a
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [chr(lowerCAmelCase__ ) for i in text.encode('utf-8' )]
return tokens
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE_ : str = self.added_tokens_encoder[token]
elif len(lowerCAmelCase__ ) != 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.unk_token_id
else:
SCREAMING_SNAKE_CASE_ : List[Any] = ord(lowerCAmelCase__ ) + self._num_special_tokens
return token_id
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if index in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE_ : Tuple = self.special_tokens_decoder[index]
else:
SCREAMING_SNAKE_CASE_ : List[str] = chr(index - self._num_special_tokens )
return token
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = b''
for token in tokens:
if token in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE_ : List[str] = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
SCREAMING_SNAKE_CASE_ : Any = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE_ : List[str] = token.encode('utf-8' )
else:
SCREAMING_SNAKE_CASE_ : int = bytes([ord(lowerCAmelCase__ )] )
bstring += tok_string
SCREAMING_SNAKE_CASE_ : int = bstring.decode('utf-8' , errors='ignore' )
return string
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
return ()
| 101 |
import re
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCAmelCase__ , UpperCAmelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 412 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
snake_case__ : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case__ : List[str] = n - k
# Calculate C(n,k)
for i in range(UpperCamelCase__ ):
result *= n - i
result //= i + 1
return result
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
return binomial_coefficient(2 * node_count , UpperCamelCase__ ) // (node_count + 1)
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
snake_case__ : Any = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
return catalan_number(UpperCamelCase__ ) * factorial(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : Tuple =int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
F"binary trees and {catalan_number(node_count)} binary search trees."
)
| 574 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'ClapFeatureExtractor'
A__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : Optional[int] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[str] = kwargs.pop('''sampling_rate''' , __UpperCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
snake_case__ : List[str] = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if audios is not None:
snake_case__ : List[Any] = self.feature_extractor(
__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and audios is not None:
snake_case__ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowerCAmelCase ( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : str , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.tokenizer.model_input_names
snake_case__ : Optional[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 574 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case , _snake_case = np.shape(snake_case__ )
if rows != columns:
_snake_case = (
"\'table\' has to be of square shaped array but got a "
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(snake_case__ )
_snake_case = np.zeros((rows, columns) )
_snake_case = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
_snake_case = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
_snake_case = (table[i][j] - total) / upper[j][j]
_snake_case = 1
for j in range(snake_case__ , snake_case__ ):
_snake_case = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
_snake_case = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlenderbotSmallConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = pad_token_id
UpperCAmelCase__ = bos_token_id
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ = prepare_blenderbot_small_inputs_dict(__a , __a , __a )
return config, inputs_dict
def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlenderbotSmallModel(config=__a ).get_decoder()
UpperCAmelCase__ = inputs_dict['input_ids']
UpperCAmelCase__ = input_ids[:1, :]
UpperCAmelCase__ = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase__ = inputs_dict['head_mask']
UpperCAmelCase__ = 1
# first forward pass
UpperCAmelCase__ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
UpperCAmelCase__ , UpperCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ = model(__a , attention_mask=__a )[0]
UpperCAmelCase__ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def UpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , ) -> int:
if attention_mask is None:
UpperCAmelCase__ = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlenderbotSmallModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
__SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M"""
@cached_property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
UpperCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 146 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 399 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''DeiTFeatureExtractor''']
_lowerCAmelCase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399 | 1 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
def count_of_possible_combinations(UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowercase )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
def count_of_possible_combinations_with_dp_array(
UpperCamelCase__ , UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCamelCase__ : List[str] = sum(
count_of_possible_combinations_with_dp_array(target - item , __lowercase )
for item in array )
UpperCamelCase__ : Optional[Any] = answer
return answer
UpperCamelCase__ : Tuple = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = [0] * (target + 1)
UpperCamelCase__ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase =3
lowerCamelCase =5
lowerCamelCase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 285 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Union[str, Any] = '''ResNetConfig'''
# Base docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[Any] = [1, 2_048, 7, 7]
# Image classification docstring
_lowerCamelCase : int = '''microsoft/resnet-50'''
_lowerCamelCase : Optional[int] = '''tiger cat'''
_lowerCamelCase : str = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 3 , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Union[str, Any] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def A ( self : Tuple , lowercase : Tensor ):
'''simple docstring'''
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowercase )
_snake_case = self.pooler(lowercase )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int = 2 ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_snake_case = nn.BatchNormad(lowercase )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = self.convolution(lowercase )
_snake_case = self.normalization(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : List[str] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : int = 1 , lowercase : str = "relu" , lowercase : int = 4 ):
'''simple docstring'''
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_snake_case = ACTaFN[activation]
def A ( self : Dict , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = hidden_state
_snake_case = self.layer(lowercase )
_snake_case = self.shortcut(lowercase )
hidden_state += residual
_snake_case = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : ResNetConfig , lowercase : int , lowercase : int , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[str] , lowercase : Tensor ):
'''simple docstring'''
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : ResNetConfig ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def A ( self : str , lowercase : Tensor , lowercase : bool = False , lowercase : bool = True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowercase )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ResNetConfig
_UpperCAmelCase : Tuple = "resnet"
_UpperCAmelCase : Optional[Any] = "pixel_values"
_UpperCAmelCase : Dict = True
def A ( self : List[str] , lowercase : Dict ):
'''simple docstring'''
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Tuple , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : int = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Union[str, Any] , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowercase )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowercase )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowercase )
super()._init_backbone(lowercase )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowercase )
_snake_case = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def A ( self : Dict , lowercase : Tensor , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowercase )
_snake_case = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , ) | 686 | 0 |
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
SCREAMING_SNAKE_CASE_ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
SCREAMING_SNAKE_CASE_ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __A ( self , snake_case_ , snake_case_ ) -> Dict:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 579 |
"""simple docstring"""
def A__ ( A__ = 1000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = -1
_UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
_UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
_UpperCAmelCase = a * b * c
if candidate >= product:
_UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 579 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase : str = logging.get_logger('''transformers.models.speecht5''')
def UpperCamelCase ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowercase =checkpoint['''input_conv.weight_g''']
lowercase =checkpoint['''input_conv.weight_v''']
lowercase =checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
lowercase =checkpoint[f'upsamples.{i}.1.weight_g']
lowercase =checkpoint[f'upsamples.{i}.1.weight_v']
lowercase =checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase =checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowercase =checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowercase =checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowercase =checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowercase =checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowercase =checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowercase =checkpoint['''output_conv.1.weight_g''']
lowercase =checkpoint['''output_conv.1.weight_v''']
lowercase =checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase =SpeechTaHifiGanConfig.from_pretrained(lowercase_ )
else:
lowercase =SpeechTaHifiGanConfig()
lowercase =SpeechTaHifiGan(lowercase_ )
lowercase =torch.load(lowercase_ )
load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ )
lowercase =np.load(lowercase_ )
lowercase =stats[0].reshape(-1 )
lowercase =stats[1].reshape(-1 )
lowercase =torch.from_numpy(lowercase_ ).float()
lowercase =torch.from_numpy(lowercase_ ).float()
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_UpperCAmelCase : Any = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 72 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any]=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : str=30 , __lowercase : Optional[Any]=400 , __lowercase : Dict=True , __lowercase : int=None , __lowercase : Tuple=True , __lowercase : Optional[Any]=None , __lowercase : List[str]=True , __lowercase : List[Any]=[0.5, 0.5, 0.5] , __lowercase : Any=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {"""shortest_edge""": 18}
__a = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 225 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_A = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
import os
# Precomputes a list of the 100 first triangular numbers
_A = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
snake_case = os.path.dirname(os.path.realpath(A__ ) )
snake_case = os.path.join(A__ , "words.txt" )
snake_case = ""
with open(A__ ) as f:
snake_case = f.readline()
snake_case = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
snake_case = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 294 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int = 50 ):
a_ : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 466 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "openai/whisper-base"
snake_case__ = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
snake_case__ = "transcriber"
snake_case__ = WhisperProcessor
snake_case__ = WhisperForConditionalGeneration
snake_case__ = ["audio"]
snake_case__ = ["text"]
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
return self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
return self.model.generate(inputs=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0]
| 466 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
_UpperCamelCase : Any = list[tuple[int, int]]
_UpperCamelCase : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = pos_x
lowercase = pos_y
lowercase = (pos_y, pos_x)
lowercase = goal_x
lowercase = goal_y
lowercase = parent
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase__ )
lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase__ )
lowercase = [self.start]
lowercase = False
def UpperCamelCase_ ( self ):
while self.node_queue:
lowercase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase = True
return self.retrace_path(UpperCamelCase__ )
lowercase = self.get_successors(UpperCamelCase__ )
for node in successors:
self.node_queue.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = []
for action in delta:
lowercase = parent.pos_x + action[1]
lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , UpperCamelCase__ ) )
return successors
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = node
lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase = current_node.parent
path.reverse()
return path
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__ )
lowercase = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__ )
lowercase = False
def UpperCamelCase_ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase = self.fwd_bfs.node_queue.pop(0 )
lowercase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase = True
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
lowercase = current_bwd_node
lowercase = current_fwd_node
lowercase = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = self.fwd_bfs.retrace_path(UpperCamelCase__ )
lowercase = self.bwd_bfs.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_UpperCamelCase : Optional[int] = (0, 0)
_UpperCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase : int = time.time()
_UpperCamelCase : Any = BreadthFirstSearch(init, goal)
_UpperCamelCase : List[Any] = bfs.search()
_UpperCamelCase : Dict = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_UpperCamelCase : Any = time.time()
_UpperCamelCase : Any = BidirectionalBreadthFirstSearch(init, goal)
_UpperCamelCase : str = bd_bfs.search()
_UpperCamelCase : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 711 |
"""simple docstring"""
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case : Image ):
'''simple docstring'''
lowercase , lowercase = image.size
lowercase = 0
lowercase = image.load()
for i in range(__snake_case ):
for j in range(__snake_case ):
lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__snake_case ):
for i in range(__snake_case ):
lowercase = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase : Any = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 134 | 0 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = """mask2former"""
lowerCAmelCase_ : List[Any] = ["""swin"""]
lowerCAmelCase_ : Optional[int] = {"""hidden_size""": """hidden_dim"""}
def __init__( self : str , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
UpperCAmelCase__ = CONFIG_MAPPING["""swin"""](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = backbone_config.pop("""model_type""" )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = feature_size
UpperCAmelCase__ = mask_feature_size
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = encoder_feedforward_dim
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = dim_feedforward
UpperCAmelCase__ = pre_norm
UpperCAmelCase__ = enforce_input_projection
UpperCAmelCase__ = common_stride
UpperCAmelCase__ = ignore_value
UpperCAmelCase__ = num_queries
UpperCAmelCase__ = no_object_weight
UpperCAmelCase__ = class_weight
UpperCAmelCase__ = mask_weight
UpperCAmelCase__ = dice_weight
UpperCAmelCase__ = train_num_points
UpperCAmelCase__ = oversample_ratio
UpperCAmelCase__ = importance_sample_ratio
UpperCAmelCase__ = init_std
UpperCAmelCase__ = init_xavier_std
UpperCAmelCase__ = use_auxiliary_loss
UpperCAmelCase__ = feature_strides
UpperCAmelCase__ = output_auxiliary_logits
UpperCAmelCase__ = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 603 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=9_9 , _lowerCamelCase=0 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase="last" , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=0 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_lengths
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = gelu_activation
lowercase = sinusoidal_embeddings
lowercase = causal
lowercase = asm
lowercase = n_langs
lowercase = vocab_size
lowercase = n_special
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = summary_type
lowercase = use_proj
lowercase = scope
lowercase = bos_token_id
def UpperCamelCase_ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_input_lengths:
lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , 2 ).float()
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = XLMModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , lengths=_lowerCamelCase , langs=_lowerCamelCase )
lowercase = model(_lowerCamelCase , langs=_lowerCamelCase )
lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = XLMWithLMHeadModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = XLMForQuestionAnsweringSimple(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase )
lowercase = model(_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
lowercase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = XLMForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase )
lowercase = model(
_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , cls_index=_lowerCamelCase , is_impossible=_lowerCamelCase , p_mask=_lowerCamelCase , )
lowercase = model(
_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , cls_index=_lowerCamelCase , is_impossible=_lowerCamelCase , )
((lowercase) , ) = result_with_labels.to_tuple()
lowercase = model(_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
((lowercase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = XLMForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase )
lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = self.num_labels
lowercase = XLMForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = self.num_choices
lowercase = XLMForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class a ( a_, a_, a_, unittest.TestCase ):
UpperCAmelCase_ : List[str] =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase_ : List[str] =(
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowercase = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def UpperCamelCase_ ( self ):
lowercase = XLMModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCamelCase , emb_dim=3_7 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1 ):
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(
[isinstance(_lowerCamelCase , _lowerCamelCase ) for iter_attentions in attentions] , [True] * len(_lowerCamelCase ) )
self.assertEqual(len(_lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowerCamelCase ):
# adds PAD dummy token
lowercase = min_length + idx + 1
lowercase = min_length + idx + 1
lowercase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowerCamelCase ) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=1 ):
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(
[isinstance(_lowerCamelCase , _lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(_lowerCamelCase ) , )
self.assertEqual(len(_lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowerCamelCase ):
# adds PAD dummy token
lowercase = min_length + idx + 1
lowercase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowerCamelCase ) , )
pass
@slow
def UpperCamelCase_ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = XLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
lowercase = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_lowerCamelCase )
lowercase = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=_lowerCamelCase ) # the president
lowercase = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowerCamelCase )
| 134 |
"""simple docstring"""
import math
import unittest
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase_ ( self ):
with self.assertRaises(_lowerCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 134 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a_ : str , a_ : Tuple=13 , a_ : str=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : List[Any]=True , a_ : Dict=True , a_ : Any=99 , a_ : Optional[int]=32 , a_ : Dict=5 , a_ : str=4 , a_ : Optional[Any]=37 , a_ : Optional[Any]="gelu" , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : Dict=5_12 , a_ : Dict=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : List[Any]=4 , a_ : Union[str, Any]=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Dict = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : Optional[int] = use_labels
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Any = scope
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Tuple = None
if self.use_input_mask:
lowerCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Union[str, Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Any , a_ : List[str] , a_ : List[Any] , a_ : Dict , a_ : str , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = NystromformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : Optional[Any] , a_ : Tuple , a_ : Optional[Any] , a_ : Dict , a_ : int ):
lowerCAmelCase_ : Dict = NystromformerForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : int , a_ : str , a_ : Union[str, Any] ):
lowerCAmelCase_ : int = NystromformerForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : List[Any] , a_ : List[str] , a_ : int , a_ : Optional[int] , a_ : List[str] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Optional[int] = NystromformerForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : str , a_ : Optional[int] , a_ : Optional[Any] , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Optional[int] = NystromformerForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : List[Any] , a_ : List[Any] , a_ : int , a_ : Dict , a_ : Tuple , a_ : str , a_ : Optional[int] , a_ : Any ):
lowerCAmelCase_ : Tuple = self.num_choices
lowerCAmelCase_ : str = NystromformerForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Dict = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : List[str] = False
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Union[str, Any] = NystromformerModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowerCamelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def lowerCamelCase ( self : Dict ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = NystromformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : List[Any] = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ )[0]
lowerCAmelCase_ : List[Any] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowerCAmelCase_ : Any = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = "the [MASK] of Belgium is Brussels"
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(encoding.input_ids ).logits
lowerCAmelCase_ : Any = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase__ ) , "capital" )
| 610 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = DistilBertTokenizer
_UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast
_UpperCAmelCase : int = True
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple=7 , __snake_case : int=3 , __snake_case : str=10 , __snake_case : Union[str, Any]=18 , __snake_case : Dict=30 , __snake_case : Union[str, Any]=400 , __snake_case : Optional[Any]=True , __snake_case : Dict=None , __snake_case : List[str]=True , __snake_case : List[Any]=[0.5, 0.5, 0.5] , __snake_case : int=[0.5, 0.5, 0.5] , __snake_case : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 18}
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : List[str] = num_frames
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : List[Any] = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : List[str] = image_mean
UpperCAmelCase_ : Dict = image_std
UpperCAmelCase_ : str = crop_size
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = VivitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : int = VivitImageProcessingTester(self )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCAmelCase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
# Initialize image_processing
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 641 |
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( __lowercase , __lowercase , __lowercase=None , **__lowercase ):
UpperCAmelCase_ : Tuple = [x.strip() for x in open(__lowercase ).readlines()]
UpperCAmelCase_ : Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
UpperCAmelCase_ : int = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 641 | 1 |
from __future__ import annotations
A__ : str = '''#'''
class __snake_case :
def __init__( self : Optional[int]):
lowerCAmelCase_ : dict = {}
def UpperCAmelCase__ ( self : Any , A_ : str):
lowerCAmelCase_ : Union[str, Any] = self._trie
for char in text:
if char not in trie:
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Optional[Any] = trie[char]
lowerCAmelCase_ : Optional[int] = True
def UpperCAmelCase__ ( self : Optional[int] , A_ : str):
lowerCAmelCase_ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase_ : int = trie[char]
else:
return []
return self._elements(A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : dict):
lowerCAmelCase_ : Dict = []
for c, v in d.items():
lowerCAmelCase_ : int = [''' '''] if c == END else [(c + s) for s in self._elements(A_)]
result.extend(A_)
return tuple(A_)
A__ : List[str] = Trie()
A__ : Tuple = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : int = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def UpperCamelCase( ):
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 171 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : List[str] = 16
A__ : Dict = 32
def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ):
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : int = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Tuple = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : str = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : Optional[int] = 8
else:
lowerCAmelCase_ : Optional[int] = None
return tokenizer.pad(
__UpperCamelCase ,padding='''longest''' ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Any = mocked_dataloaders # noqa: F811
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,__UpperCamelCase ) == "1":
lowerCAmelCase_ : Dict = 2
# New Code #
lowerCAmelCase_ : List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ : Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : List[Any] = config['''lr''']
lowerCAmelCase_ : List[Any] = int(config['''num_epochs'''] )
lowerCAmelCase_ : int = int(config['''seed'''] )
lowerCAmelCase_ : Optional[Any] = int(config['''batch_size'''] )
lowerCAmelCase_ : Optional[Any] = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = get_dataloaders(__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : Tuple = AdamW(params=model.parameters() ,lr=__UpperCamelCase )
# Instantiate scheduler
lowerCAmelCase_ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=100 ,num_training_steps=(len(__UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase ,model=__UpperCamelCase ,local_sgd_steps=__UpperCamelCase ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
lowerCAmelCase_ : str = model(**__UpperCamelCase )
lowerCAmelCase_ : Tuple = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__UpperCamelCase )
lowerCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCAmelCase_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=__UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument(
'''--local_sgd_steps''' ,type=__UpperCamelCase ,default=8 ,help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 171 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def lowercase__ ( snake_case__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCamelCase__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 12 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowercase = get_logger(__name__)
class __a ( enum.Enum ):
'''simple docstring'''
_lowerCamelCase : Tuple = """all_checks"""
_lowerCamelCase : Optional[int] = """basic_checks"""
_lowerCamelCase : str = """no_checks"""
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=None ) ->Union[str, Any]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = " for " + verification_name if verification_name is not None else ""
if len(__magic_name__ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__magic_name__ ) > 0:
raise NonMatchingSplitsSizesError(str(__magic_name__ ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ = True ) ->dict:
if record_checksum:
__lowercase = shaaaa()
with open(__magic_name__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ):
m.update(__magic_name__ )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(__magic_name__ ), "checksum": checksum}
def lowerCAmelCase__ ( __magic_name__ ) ->List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 118 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE )
if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
SCREAMING_SNAKE_CASE__ = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
SCREAMING_SNAKE_CASE__ = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def _snake_case ( self , lowercase , lowercase ) -> Any:
lowerCAmelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowerCAmelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowerCAmelCase = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 393 | 1 |
'''simple docstring'''
from functools import reduce
A__ : Union[str, Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : str(int(UpperCAmelCase_ ) * int(UpperCAmelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCAmelCase_ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95 | 0 |
"""simple docstring"""
import os
_lowerCAmelCase : List[Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = 0
while index < len(_lowerCamelCase ) - 1:
_lowerCamelCase : Optional[int] = SYMBOLS[numerals[index]]
_lowerCamelCase : Any = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : str = ""
_lowerCamelCase : Tuple = num // 1000
numerals += m_count * "M"
num %= 1000
_lowerCamelCase : List[str] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_lowerCamelCase : Union[str, Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase_( _lowerCamelCase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : str = 0
with open(os.path.dirname(_lowerCamelCase ) + roman_numerals_filename ) as filea:
_lowerCamelCase : int = filea.readlines()
for line in lines:
_lowerCamelCase : List[Any] = line.strip()
_lowerCamelCase : List[Any] = parse_roman_numerals(_lowerCamelCase )
_lowerCamelCase : List[str] = generate_roman_numerals(_lowerCamelCase )
savings += len(_lowerCamelCase ) - len(_lowerCamelCase )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''') | 386 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCamelCase : str = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCamelCase : str = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowerCamelCase : Optional[int] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCamelCase : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : Tuple = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowerCamelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : List[str] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : Tuple = flax_model.params["encoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : int = tax_attention_key
_lowerCamelCase : Union[str, Any] = tax_attention_out
_lowerCamelCase : str = tax_attention_query
_lowerCamelCase : Dict = tax_attention_value
_lowerCamelCase : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
_lowerCamelCase : Optional[Any] = tax_mlp_wi_a
_lowerCamelCase : int = tax_mlp_wi_a
else:
_lowerCamelCase : str = tax_mlp_wi
_lowerCamelCase : Optional[int] = tax_mlp_wo
_lowerCamelCase : List[str] = tax_mlp_layer_norm
_lowerCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : int = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[str] = tax_encoder_global_rel_embedding
# Assigning
_lowerCamelCase : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowerCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : str = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowerCamelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowerCamelCase : Any = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowerCamelCase : List[str] = tax_enc_dec_attention_module["key"]["kernel"]
_lowerCamelCase : Tuple = tax_enc_dec_attention_module["out"]["kernel"]
_lowerCamelCase : Union[str, Any] = tax_enc_dec_attention_module["query"]["kernel"]
_lowerCamelCase : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : str = flax_model.params["decoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : Tuple = tax_attention_key
_lowerCamelCase : List[str] = tax_attention_out
_lowerCamelCase : Union[str, Any] = tax_attention_query
_lowerCamelCase : Optional[int] = tax_attention_value
_lowerCamelCase : Optional[Any] = tax_pre_attention_layer_norm
_lowerCamelCase : Tuple = tax_enc_dec_attention_key
_lowerCamelCase : List[str] = tax_enc_dec_attention_out
_lowerCamelCase : Tuple = tax_enc_dec_attention_query
_lowerCamelCase : Tuple = tax_enc_dec_attention_value
_lowerCamelCase : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_lowerCamelCase : List[Any] = tax_mlp_wi_a
_lowerCamelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCamelCase : Dict = tax_mlp_wi
_lowerCamelCase : Union[str, Any] = tax_mlp_wo
_lowerCamelCase : Dict = txa_mlp_layer_norm
_lowerCamelCase : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowerCamelCase : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
_lowerCamelCase : int = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCamelCase : Union[str, Any] = tax_model["target"]["token_embedder"]["embedding"]
_lowerCamelCase : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 386 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( __snake_case , unittest.TestCase):
__lowerCamelCase = CTRLTokenizer
__lowerCamelCase = False
__lowerCamelCase = False
def A (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
A__ = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def A (self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = """adapt react readapt apt"""
A__ = """adapt react readapt apt"""
return input_text, output_text
def A (self ):
"""simple docstring"""
A__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = """adapt react readapt apt"""
A__ = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 574 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase):
def A (self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def A (self ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
A__ = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def A (self ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A__ = DDPMScheduler()
A__ = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ , steps=4 )
A__ = output.audios[0]
A__ = output.images[0]
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
A__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A__ = DDIMScheduler()
A__ = self.dummy_vqvae_and_unet
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
A__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=1_0 )
A__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A__ = self.dummy_unet_condition
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
A__ = torch.rand((1, 1, 1_0) )
A__ = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
A__ = output.images[0]
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
def A (self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self ):
"""simple docstring"""
A__ = torch_device
A__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ )
A__ = output.audios[0]
A__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
A__ = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 574 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Any= {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int= [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Optional[Any]= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 | """simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> list[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = [True] * limit
__snake_case : Tuple = False
__snake_case : Union[str, Any] = False
__snake_case : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case : List[str] = i * 2
while index < limit:
__snake_case : List[str] = False
__snake_case : Union[str, Any] = index + i
__snake_case : str = [2]
for i in range(3 , UpperCAmelCase_ , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase_ )
return primes
def __UpperCAmelCase ( UpperCAmelCase_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
__snake_case : Tuple = prime_sieve(UpperCAmelCase_ )
__snake_case : int = 0
__snake_case : str = 0
for i in range(len(UpperCAmelCase_ ) ):
for j in range(i + length , len(UpperCAmelCase_ ) ):
__snake_case : Union[str, Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case : List[Any] = j - i
__snake_case : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 192 | 0 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : str , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Tuple , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Optional[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Dict , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : List[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class A ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase__ (cls : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 15 |
'''simple docstring'''
lowercase : int = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Dict:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__A )
_snake_case = [s]
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
_snake_case = True
_snake_case = u
return visited[t]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = [-1] * (len(__A ))
_snake_case = 0
_snake_case = []
_snake_case = [i[:] for i in graph] # Record original cut, copy.
while bfs(__A , __A , __A , __A ):
_snake_case = float('Inf' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__A , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
for i in range(len(__A ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 495 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 703 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : torch.FloatTensor
__lowerCAmelCase : torch.FloatTensor
__lowerCAmelCase : Optional[torch.FloatTensor] = None
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 1.0_0_7 , SCREAMING_SNAKE_CASE_ = 80 , SCREAMING_SNAKE_CASE_ = 0.0_5 , SCREAMING_SNAKE_CASE_ = 50 , ):
'''simple docstring'''
lowercase__ : List[Any] = sigma_max
# setable values
lowercase__ : int = None
lowercase__ : np.IntTensor = None
lowercase__ : torch.FloatTensor = None # sigma(t_i)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
return sample
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Dict = num_inference_steps
lowercase__ : List[Any] = np.arange(0 , self.num_inference_steps)[::-1].copy()
lowercase__ : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_)
lowercase__ : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
lowercase__ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : Dict = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_).to(sample.device)
lowercase__ : int = sigma + gamma * sigma
lowercase__ : Optional[int] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
'''simple docstring'''
lowercase__ : str = sample_hat + sigma_hat * model_output
lowercase__ : str = (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
'''simple docstring'''
lowercase__ : str = sample_prev + sigma_prev * model_output
lowercase__ : Dict = (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
raise NotImplementedError()
| 495 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = PriorTransformer
lowerCamelCase__ = 'hidden_states'
@property
def __a ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[Any] =4
SCREAMING_SNAKE_CASE : Tuple =8
SCREAMING_SNAKE_CASE : Union[str, Any] =7
SCREAMING_SNAKE_CASE : Dict =floats_tensor((batch_size, embedding_dim) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : Any =floats_tensor((batch_size, embedding_dim) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : int =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(snake_case_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __a ( self , snake_case_=0 ) -> List[str]:
torch.manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE : Dict =4
SCREAMING_SNAKE_CASE : Optional[int] =8
SCREAMING_SNAKE_CASE : List[str] =7
SCREAMING_SNAKE_CASE : Dict =torch.randn((batch_size, embedding_dim) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] =torch.randn((batch_size, embedding_dim) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(snake_case_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __a ( self ) -> Optional[Any]:
return (4, 8)
@property
def __a ( self ) -> Tuple:
return (4, 8)
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
SCREAMING_SNAKE_CASE : Optional[int] =self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple =self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str =self.model_class(**snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str =['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , snake_case_ )
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Any =PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
SCREAMING_SNAKE_CASE : Dict =model.to(snake_case_ )
if hasattr(snake_case_ , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE : Optional[int] =self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str =model(**snake_case_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] =output[0, :5].flatten().cpu()
print(snake_case_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE : List[Any] =torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1E-2 ) )
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self , snake_case_=1 , snake_case_=768 , snake_case_=77 , snake_case_=0 ) -> Tuple:
torch.manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE : str =batch_size
SCREAMING_SNAKE_CASE : Any =embedding_dim
SCREAMING_SNAKE_CASE : Union[str, Any] =num_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] =torch.randn((batch_size, embedding_dim) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] =torch.randn((batch_size, embedding_dim) ).to(snake_case_ )
SCREAMING_SNAKE_CASE : str =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(snake_case_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def __a ( self , snake_case_ , snake_case_ ) -> List[str]:
SCREAMING_SNAKE_CASE : Any =PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] =self.get_dummy_seed_input(seed=snake_case_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int =model(**snake_case_ )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE : Optional[Any] =sample[0, :8].flatten().cpu()
print(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =torch.tensor(snake_case_ )
assert torch_all_close(snake_case_ , snake_case_ , atol=1E-3 )
| 258 |
import numpy as np
_A = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _lowerCAmelCase :
def __init__( self ) -> None:
SCREAMING_SNAKE_CASE : str =np.array(snake_case_ )
def __a ( self , snake_case_ ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE : Optional[Any] =np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , snake_case_ , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] =self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Tuple =message.lower()
SCREAMING_SNAKE_CASE : Tuple =message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : List[Any] =message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE : Optional[int] =np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Any =self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : Dict =numbers[0]
SCREAMING_SNAKE_CASE : Optional[int] =numbers[1]
SCREAMING_SNAKE_CASE : Optional[int] =first_step.reshape(2 * len(snake_case_ ) )
SCREAMING_SNAKE_CASE : str =''''''
for numbers_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Dict =int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE : str =int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE : Union[str, Any] =self.numbers_to_letter(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =encoded_message + letter
return encoded_message
def __a ( self , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Tuple =message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : List[Any] =np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Optional[int] =self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : str =numbers[0]
SCREAMING_SNAKE_CASE : int =numbers[1]
SCREAMING_SNAKE_CASE : Optional[int] =first_step.reshape((2, len(snake_case_ )) )
SCREAMING_SNAKE_CASE : Optional[Any] =''''''
for numbers_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : List[Any] =int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE : Optional[int] =int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE : int =self.numbers_to_letter(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : int =decoded_message + letter
return decoded_message
| 258 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : Path , _lowerCAmelCase : Union[str, None] = None , _lowerCAmelCase : Union[List[str], None] = None , _lowerCAmelCase : Union[str, List[str], None] = None , _lowerCAmelCase : bool = True , ):
'''simple docstring'''
__lowercase =[file for file in os.listdir(_lowerCAmelCase) if os.path.isfile(os.path.join(_lowerCAmelCase , _lowerCAmelCase))]
if identifier is not None:
__lowercase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
for n_ in n_identifier:
__lowercase =[file for file in files if n_ not in file]
else:
__lowercase =[file for file in files if n_identifier not in file]
__lowercase =ignore_files or []
ignore_files.append('__init__.py')
__lowercase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , _lowerCAmelCase)
if only_modules:
__lowercase =file.split('.')[0]
try:
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =doctest.DocTestSuite(_lowerCAmelCase)
__lowercase =unittest.TextTestRunner().run(_lowerCAmelCase)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
__lowercase =doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =Path('src/transformers')
__lowercase ='modeling'
__lowercase =[
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase , ignore_files=_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =Path('src/transformers')
__lowercase ='tokenization'
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =Path('src/transformers')
__lowercase ='configuration'
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =Path('src/transformers')
__lowercase =['configuration', 'modeling', 'tokenization']
self.analyze_directory(_lowerCAmelCase , n_identifier=_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =Path('docs/source')
__lowercase =['favicon.ico']
self.analyze_directory(_lowerCAmelCase , ignore_files=_lowerCAmelCase , only_modules=_lowerCAmelCase)
| 454 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """ViltImageProcessor"""
lowerCAmelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : str , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.image_processor
def __call__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
__lowercase =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase)
encoding.update(_lowerCAmelCase)
return encoding
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 454 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : str ={'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 434 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowercase : int =trt.Logger(trt.Logger.WARNING)
_lowercase : Optional[int] =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowercase : Optional[Any] =logging.getLogger(__name__)
_lowercase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_lowercase : List[str] =parser.parse_args()
if args.tokenizer_name:
_lowercase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_lowercase : str =args.per_device_eval_batch_size
_lowercase : Union[str, Any] =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowercase : Optional[int] =True
_lowercase : str ="temp_engine/bert-fp32.engine"
if args.fpaa:
_lowercase : Union[str, Any] ="temp_engine/bert-fp16.engine"
if args.inta:
_lowercase : int ="temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_lowercase : Tuple =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowercase : int =[network.get_input(i) for i in range(network.num_inputs)]
_lowercase : str =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowercase : Optional[int] =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowercase : str =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowercase : Union[str, Any] =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def __UpperCAmelCase ( UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Dict ) -> List[str]:
snake_case__ : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
snake_case__ : Optional[int] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
snake_case__ : Union[str, Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , UpperCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , UpperCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , UpperCamelCase__ )
# start time
snake_case__ : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCamelCase__ ) for d_inp in d_inputs] + [int(UpperCamelCase__ ), int(UpperCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
cuda.memcpy_dtoh_async(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
snake_case__ : str = time.time()
snake_case__ : Dict = end_time - start_time
snake_case__ : Optional[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowercase : Optional[Any] =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase : List[str] =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowercase : List[str] =raw_datasets["validation"].column_names
_lowercase : Dict ="question" if "question" in column_names else column_names[0]
_lowercase : Tuple ="context" if "context" in column_names else column_names[1]
_lowercase : List[str] ="answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowercase : List[Any] =tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
_lowercase : int =min(args.max_seq_length, tokenizer.model_max_length)
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> str:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
snake_case__ : int = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
snake_case__ : int = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=UpperCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
snake_case__ : str = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
snake_case__ : Optional[int] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
snake_case__ : Tuple = tokenized_examples.sequence_ids(UpperCamelCase__ )
snake_case__ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
snake_case__ : Any = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
snake_case__ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_lowercase : List[Any] =raw_datasets["validation"]
# Validation Feature Creation
_lowercase : Tuple =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_lowercase : Dict =default_data_collator
_lowercase : Dict =eval_dataset.remove_columns(["example_id", "offset_mapping"])
_lowercase : List[str] =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :List[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :List[str]="eval" ) -> str:
# Post-processing: we match the start logits and end logits to answers in the original context.
snake_case__ : Any = postprocess_qa_predictions(
examples=UpperCamelCase__ , features=UpperCamelCase__ , predictions=UpperCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=UpperCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
snake_case__ : List[str] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
snake_case__ : Optional[int] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
snake_case__ : Union[str, Any] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCamelCase__ , label_ids=UpperCamelCase__ )
_lowercase : int =load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __UpperCAmelCase ( UpperCamelCase__ :Tuple ) -> int:
return trt.volume(engine.get_binding_shape(UpperCamelCase__ ) ) * engine.get_binding_dtype(UpperCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
_lowercase : List[str] =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowercase : Optional[Any] =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowercase : Any =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowercase : List[str] =cuda.mem_alloc(h_outputa.nbytes)
_lowercase : str =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowercase : Any =cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
_lowercase : Dict =0.0
_lowercase : Any =0
_lowercase : Any =timeit.default_timer()
_lowercase : List[str] =None
for step, batch in enumerate(eval_dataloader):
_lowercase , _lowercase : Any =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowercase , _lowercase : Dict =outputs
_lowercase : Dict =torch.tensor(start_logits)
_lowercase : List[Any] =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowercase : Union[str, Any] =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowercase : Optional[int] =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowercase : List[str] =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowercase : Dict =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowercase : Tuple =nested_truncate(all_preds, len(eval_dataset))
_lowercase : List[Any] =timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
_lowercase : List[Any] =post_processing_function(eval_examples, eval_dataset, all_preds)
_lowercase : str =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 574 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowercase : List[str] =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE (lowercase__ ):
def __init__( self : Any , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 574 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Dict , _A : PriorTransformer , _A : CLIPVisionModel , _A : CLIPImageProcessor , _A : HeunDiscreteScheduler , _A : ShapERenderer , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=_A , image_encoder=_A , image_processor=_A , scheduler=_A , renderer=_A , )
def UpperCAmelCase__ ( self : Tuple , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict ):
"""simple docstring"""
if latents is None:
__SCREAMING_SNAKE_CASE : Tuple = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__SCREAMING_SNAKE_CASE : List[str] = latents.to(_A )
__SCREAMING_SNAKE_CASE : List[str] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__SCREAMING_SNAKE_CASE : str = torch.device(F'''cuda:{gpu_id}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCAmelCase__ ( self : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : List[str] , ):
"""simple docstring"""
if isinstance(_A , _A ) and isinstance(image[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(_A , axis=0 ) if image[0].ndim == 4 else torch.stack(_A , axis=0 )
if not isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE : Any = self.image_processor(_A , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
__SCREAMING_SNAKE_CASE : List[Any] = image.to(dtype=self.image_encoder.dtype , device=_A )
__SCREAMING_SNAKE_CASE : Dict = self.image_encoder(_A )['''last_hidden_state''']
__SCREAMING_SNAKE_CASE : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__SCREAMING_SNAKE_CASE : Optional[Any] = image_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(_A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE : str = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : List[str] , _A : Union[PIL.Image.Image, List[PIL.Image.Image]] , _A : int = 1 , _A : int = 25 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : float = 4.0 , _A : int = 64 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
if isinstance(_A , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE : str = 1
elif isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE : int = image.shape[0]
elif isinstance(_A , _A ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__SCREAMING_SNAKE_CASE : List[Any] = len(_A )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_A )}''' )
__SCREAMING_SNAKE_CASE : List[str] = self._execution_device
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size * num_images_per_prompt
__SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0
__SCREAMING_SNAKE_CASE : Dict = self._encode_image(_A , _A , _A , _A )
# prior
self.scheduler.set_timesteps(_A , device=_A )
__SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE : Dict = self.prior.config.num_embeddings
__SCREAMING_SNAKE_CASE : int = self.prior.config.embedding_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__SCREAMING_SNAKE_CASE : Tuple = latents.reshape(latents.shape[0] , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = self.prior(
_A , timestep=_A , proj_embedding=_A , ).predicted_image_embedding
# remove the variance
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(
_A , timestep=_A , sample=_A , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_A )
__SCREAMING_SNAKE_CASE : str = []
for i, latent in enumerate(_A ):
print()
__SCREAMING_SNAKE_CASE : Dict = self.renderer.decode(
latent[None, :] , _A , size=_A , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack(_A )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = images.cpu().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : Optional[int] = [self.numpy_to_pil(_A ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_A )
| 74 |
from __future__ import annotations
def a_ ( __lowerCAmelCase ):
if not nums:
return 0
lowerCAmelCase__ = nums[0]
lowerCAmelCase__ = 0
for num in nums[1:]:
lowerCAmelCase__ , lowerCAmelCase__ = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 615 | 0 |
from math import factorial, pi
def _snake_case ( __snake_case , __snake_case = 3_0 ) -> float:
'''simple docstring'''
if not isinstance(__snake_case , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__snake_case , __snake_case ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCAmelCase_ : List[Any] = float(__snake_case )
UpperCAmelCase_ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__snake_case ) )
def _snake_case ( __snake_case , __snake_case = 3_0 ) -> float:
'''simple docstring'''
if not isinstance(__snake_case , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__snake_case , __snake_case ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCAmelCase_ : List[Any] = float(__snake_case )
UpperCAmelCase_ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 721 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ : List[str] = FunnelConfig.from_json_file(__snake_case )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ : str = FunnelBaseModel(__snake_case ) if base_model else FunnelModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 455 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 |
"""simple docstring"""
import sys
import turtle
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__lowerCAmelCase , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , depth - 1 )
triangle(__lowerCAmelCase , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , depth - 1 )
triangle(__lowerCAmelCase , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCAmelCase : Optional[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCAmelCase : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 567 | 0 |
"""simple docstring"""
from collections.abc import Generator
def A__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def A__ ( A__ = 1000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(A__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 704 |
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
SCREAMING_SNAKE_CASE_ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
SCREAMING_SNAKE_CASE_ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __A ( self , snake_case_ , snake_case_ ) -> Dict:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 579 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_ ():
__UpperCamelCase : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowerCAmelCase )
env_command_parser(subparsers=_lowerCAmelCase )
launch_command_parser(subparsers=_lowerCAmelCase )
tpu_command_parser(subparsers=_lowerCAmelCase )
test_command_parser(subparsers=_lowerCAmelCase )
# Let's go
__UpperCamelCase : int = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowerCAmelCase )
if __name__ == "__main__":
main() | 327 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=0 ):
# Format the message.
if name is None:
__UpperCamelCase : List[Any] = None
else:
__UpperCamelCase : Tuple = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
__UpperCamelCase : str = fmt.format(_lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if msg is not None:
print(_lowerCAmelCase )
for k in val.keys():
recursive_print(_lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCAmelCase , torch.Tensor ):
print(_lowerCAmelCase , ":" , val.size() )
else:
print(_lowerCAmelCase , ":" , _lowerCAmelCase )
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__UpperCamelCase : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__UpperCamelCase : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
__UpperCamelCase : int = param.view(*_lowerCAmelCase )
__UpperCamelCase : int = param.transpose(0 , 2 )
__UpperCamelCase : Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__UpperCamelCase : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__UpperCamelCase : Optional[Any] = param.view(*_lowerCAmelCase )
__UpperCamelCase : Optional[int] = param.transpose(0 , 1 ).contiguous()
__UpperCamelCase : Optional[Any] = param.view(*_lowerCAmelCase )
return param
def UpperCAmelCase_ (_lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
# The converted output model.
__UpperCamelCase : str = {}
# old versions did not store training args
__UpperCamelCase : Union[str, Any] = input_state_dict.get("args" , _lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__UpperCamelCase : Any = ds_args.padded_vocab_size
__UpperCamelCase : Dict = ds_args.max_position_embeddings
__UpperCamelCase : Any = ds_args.hidden_size
__UpperCamelCase : int = ds_args.num_layers
__UpperCamelCase : str = ds_args.num_attention_heads
__UpperCamelCase : Any = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__UpperCamelCase : int = config.n_head
# The hidden_size per head.
__UpperCamelCase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__UpperCamelCase : Optional[Any] = input_state_dict["checkpoint_version"]
else:
__UpperCamelCase : Union[str, Any] = 0.0
# The model.
__UpperCamelCase : int = input_state_dict["model"]
# The language model.
__UpperCamelCase : List[Any] = model["language_model"]
# The embeddings.
__UpperCamelCase : Union[str, Any] = lm["embedding"]
# The word embeddings.
__UpperCamelCase : Union[str, Any] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
__UpperCamelCase : List[Any] = word_embeddings[: config.vocab_size, :]
__UpperCamelCase : List[Any] = word_embeddings
# The position embeddings.
__UpperCamelCase : Optional[int] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__UpperCamelCase : str = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
__UpperCamelCase : Union[str, Any] = pos_embeddings
# The transformer.
__UpperCamelCase : Optional[int] = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
__UpperCamelCase : str = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
__UpperCamelCase : Dict = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__UpperCamelCase : Optional[int] = layer_re.match(_lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__UpperCamelCase : str = int(m.group(1 ) )
# The name of the operation.
__UpperCamelCase : Dict = m.group(2 )
# Is it a weight or a bias?
__UpperCamelCase : Optional[int] = m.group(3 )
# The name of the layer.
__UpperCamelCase : Any = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
__UpperCamelCase : Optional[Any] = "ln_1" if op_name.startswith("input" ) else "ln_2"
__UpperCamelCase : int = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__UpperCamelCase : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Optional[int] = causal_mask
# Insert a "dummy" tensor for masked_bias.
__UpperCamelCase : int = torch.tensor(-1E4 , dtype=torch.floataa )
__UpperCamelCase : List[str] = masked_bias
__UpperCamelCase : str = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__UpperCamelCase : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
__UpperCamelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__UpperCamelCase : Any = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Store. No change of shape.
__UpperCamelCase : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__UpperCamelCase : str = megatron_to_transformers[op_name]
__UpperCamelCase : Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__UpperCamelCase : List[str] = megatron_to_transformers[op_name]
__UpperCamelCase : Any = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__UpperCamelCase : List[str] = transformer["final_layernorm.weight"]
__UpperCamelCase : Union[str, Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
__UpperCamelCase : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase_ ():
# Create the argument parser.
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_lowerCAmelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_lowerCAmelCase , help="An optional config json file describing the pre-trained model." , )
__UpperCamelCase : int = parser.parse_args()
# Extract the basename.
__UpperCamelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
__UpperCamelCase : Union[str, Any] = torch.load(_lowerCAmelCase , map_location="cpu" )
else:
__UpperCamelCase : str = torch.load(args.path_to_checkpoint , map_location="cpu" )
__UpperCamelCase : Any = input_state_dict.get("args" , _lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__UpperCamelCase : List[Any] = "gelu_fast"
elif ds_args.openai_gelu:
__UpperCamelCase : Union[str, Any] = "gelu_new"
else:
__UpperCamelCase : Optional[int] = "gelu"
else:
# in the very early days this used to be "gelu_new"
__UpperCamelCase : List[str] = "gelu_new"
# Spell out all parameters in case the defaults change.
__UpperCamelCase : Dict = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=_lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_lowerCAmelCase , summary_activation=_lowerCAmelCase , summary_proj_to_labels=_lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCAmelCase , use_cache=_lowerCAmelCase , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
__UpperCamelCase : Tuple = GPTaConfig.from_json_file(args.config_file )
__UpperCamelCase : str = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
__UpperCamelCase : Tuple = convert_megatron_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCAmelCase , _lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__UpperCamelCase : Dict = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__UpperCamelCase : int = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
__UpperCamelCase : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
__UpperCamelCase : Dict = "gpt2"
__UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = type(_lowerCAmelCase ).__name__
__UpperCamelCase : Tuple = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_lowerCAmelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_lowerCAmelCase )
# Store the state_dict to file.
__UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , "pytorch_model.bin" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 327 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ):
_lowercase =[file for file in os.listdir(A_ ) if os.path.isfile(os.path.join(A_ , A_ ) )]
if identifier is not None:
_lowercase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A_ , A_ ):
for n_ in n_identifier:
_lowercase =[file for file in files if n_ not in file]
else:
_lowercase =[file for file in files if n_identifier not in file]
_lowercase =ignore_files or []
ignore_files.append("__init__.py" )
_lowercase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , A_ )
if only_modules:
_lowercase =file.split("." )[0]
try:
_lowercase =getattr(A_ , A_ )
_lowercase =doctest.DocTestSuite(A_ )
_lowercase =unittest.TextTestRunner().run(A_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowercase =doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCAmelCase ( self ):
_lowercase =Path("src/transformers" )
_lowercase ="modeling"
_lowercase =[
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(A_ , identifier=A_ , ignore_files=A_ )
def __lowerCAmelCase ( self ):
_lowercase =Path("src/transformers" )
_lowercase ="tokenization"
self.analyze_directory(A_ , identifier=A_ )
def __lowerCAmelCase ( self ):
_lowercase =Path("src/transformers" )
_lowercase ="configuration"
self.analyze_directory(A_ , identifier=A_ )
def __lowerCAmelCase ( self ):
_lowercase =Path("src/transformers" )
_lowercase =["configuration", "modeling", "tokenization"]
self.analyze_directory(A_ , n_identifier=A_ )
def __lowerCAmelCase ( self ):
_lowercase =Path("docs/source" )
_lowercase =["favicon.ico"]
self.analyze_directory(A_ , ignore_files=A_ , only_modules=A_ )
| 716 | import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
_lowercase =json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
_lowercase =self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ):
_lowercase =self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 594 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_UpperCAmelCase : List[Any] = '''.'''
if __name__ == "__main__":
_UpperCAmelCase : int = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
_UpperCAmelCase : str = line.strip()
_UpperCAmelCase : Tuple = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_UpperCAmelCase : Tuple = '''\n'''.join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 72 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : str = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["input_ids", "attention_mask"]
lowerCAmelCase_ : Dict = BartTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , a__=True , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , a__ , a__ )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , a__ ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(a__ , state.pop("type" ) )
snake_case_ = component_class(**a__ )
setattr(self.backend_tokenizer , a__ , a__ )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value
snake_case_ = value
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = kwargs.get("is_split_into_words" , a__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a__ , **a__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = kwargs.get("is_split_into_words" , a__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ , a__=None ) -> int:
'''simple docstring'''
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 400 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
A : Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case__ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
__snake_case : Tuple = AudioClassificationPipeline(model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
# test with a raw waveform
__snake_case : Optional[int] = np.zeros((3_40_00,) )
__snake_case : int = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def snake_case__ ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : Dict = examples
__snake_case : List[str] = audio_classifier(_lowerCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
__snake_case : List[Any] = audio_classifier(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
self.run_torchaudio(_lowerCAmelCase )
@require_torchaudio
def snake_case__ ( self : Any , _lowerCAmelCase : Dict ):
import datasets
# test with a local file
__snake_case : str = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
__snake_case : int = dataset[0]["""audio"""]["""array"""]
__snake_case : Union[str, Any] = audio_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def snake_case__ ( self : int ):
__snake_case : Optional[int] = """anton-l/wav2vec2-random-tiny-classifier"""
__snake_case : str = pipeline("""audio-classification""" , model=_lowerCAmelCase )
__snake_case : Any = np.ones((80_00,) )
__snake_case : List[str] = audio_classifier(_lowerCAmelCase , top_k=4 )
__snake_case : List[Any] = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
__snake_case : Any = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(_lowerCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__snake_case : List[str] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
__snake_case : List[Any] = audio_classifier(_lowerCAmelCase , top_k=4 )
self.assertIn(nested_simplify(_lowerCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case__ ( self : int ):
import datasets
__snake_case : Dict = """superb/wav2vec2-base-superb-ks"""
__snake_case : Optional[Any] = pipeline("""audio-classification""" , model=_lowerCAmelCase )
__snake_case : Union[str, Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
__snake_case : Tuple = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
__snake_case : List[Any] = audio_classifier(_lowerCAmelCase , top_k=4 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def snake_case__ ( self : Any ):
pass
| 705 | import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__snake_case : Union[str, Any] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__snake_case : Tuple = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__snake_case : Dict = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__snake_case : int = 4
__snake_case : Dict = True
# hparam_utils.py hparams
__snake_case : Any = 0.66_46_94
__snake_case : Optional[int] = 0.20_79_51
__snake_case : str = 0.12_11_94
__snake_case : Optional[int] = True
__snake_case : int = True
__snake_case : int = False
__snake_case : List[Any] = 0.0_35_25_13
__snake_case : Any = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__snake_case : Any = 4
__snake_case : Union[str, Any] = False
# hparam_utils.py hparams
__snake_case : Any = 36.45_19
__snake_case : Union[str, Any] = 0.90_34_21
__snake_case : Any = 2_22.0_88
__snake_case : Tuple = True
__snake_case : List[str] = True
__snake_case : Any = True
__snake_case : str = 0.76_31_41
__snake_case : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__snake_case : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
__snake_case : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__snake_case : Optional[int] = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
__snake_case : Tuple = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + """vocab.txt""" , model_max_length=5_1_2 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 390 | 0 |
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
for i in table:
res += inp[i - 1]
return res
def a__ ( A__ ):
return data[1:] + data[0]
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = int('0b' + data[0] + data[-1], 2 )
SCREAMING_SNAKE_CASE_ : Tuple = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def a__ ( A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = message[:4]
SCREAMING_SNAKE_CASE_ : int = message[4:]
SCREAMING_SNAKE_CASE_ : List[str] = apply_table(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = xor(A__, A__ )
SCREAMING_SNAKE_CASE_ : Any = apply_sbox(A__, temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE_ : List[Any] = apply_sbox(A__, temp[4:] )
SCREAMING_SNAKE_CASE_ : Any = '0' * (2 - len(A__ )) + l # noqa: E741
SCREAMING_SNAKE_CASE_ : List[Any] = '0' * (2 - len(A__ )) + r
SCREAMING_SNAKE_CASE_ : Optional[int] = apply_table(l + r, A__ )
SCREAMING_SNAKE_CASE_ : Dict = xor(A__, A__ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] =input('Enter 10 bit key: ')
lowerCAmelCase__ : int =input('Enter 8 bit message: ')
lowerCAmelCase__ : Any =[6, 3, 7, 4, 8, 5, 10, 9]
lowerCAmelCase__ : List[str] =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCAmelCase__ : Dict =[2, 4, 3, 1]
lowerCAmelCase__ : Dict =[2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase__ : Union[str, Any] =[4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase__ : Dict =[4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase__ : str =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase__ : Any =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase__ : Dict =apply_table(key, paa_table)
lowerCAmelCase__ : List[str] =temp[:5]
lowerCAmelCase__ : List[Any] =temp[5:]
lowerCAmelCase__ : Tuple =left_shift(left)
lowerCAmelCase__ : Optional[Any] =left_shift(right)
lowerCAmelCase__ : Optional[int] =apply_table(left + right, pa_table)
lowerCAmelCase__ : Optional[Any] =left_shift(left)
lowerCAmelCase__ : Optional[Any] =left_shift(right)
lowerCAmelCase__ : List[str] =left_shift(left)
lowerCAmelCase__ : Optional[int] =left_shift(right)
lowerCAmelCase__ : Tuple =apply_table(left + right, pa_table)
# encryption
lowerCAmelCase__ : Tuple =apply_table(message, IP)
lowerCAmelCase__ : List[Any] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : str =temp[4:] + temp[:4]
lowerCAmelCase__ : Optional[Any] =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : Optional[int] =apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCAmelCase__ : Union[str, Any] =apply_table(CT, IP)
lowerCAmelCase__ : Tuple =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : Union[str, Any] =temp[4:] + temp[:4]
lowerCAmelCase__ : Dict =function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ : int =apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 101 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_0_0_0 , lowerCAmelCase__=[3, 3, 6, 4] , lowerCAmelCase__=[4_8, 5_6, 1_1_2, 2_2_0] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_depths
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dims
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase__ , layer_scale_init_value=1E-5 , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SwiftFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE_ : str = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_ : Tuple = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = SwiftFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Any = 8
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def _config_zero_init(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , 1E-10 )
if isinstance(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = _config_zero_init(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Any = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 101 | 1 |
from collections import deque
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
a__ = process_name # process name
a__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
a__ = arrival_time
a__ = burst_time # remaining burst time
a__ = 0 # total time of the process wait in ready queue
a__ = 0 # time from arrival time to completion time
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None:
# total number of mlfq's queues
a__ = number_of_queues
# time slice of queues that round robin algorithm applied
a__ = time_slices
# unfinished process is in this ready_queue
a__ = queue
# current time
a__ = current_time
# finished process is in this sequence queue
a__ = deque()
def _UpperCAmelCase ( self ) -> list[str]:
a__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]:
a__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]:
a__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]:
a__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]:
return [q.burst_time for q in queue]
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> deque[Process]:
a__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE ) != 0:
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
a__ = 0
# set the process's turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# set the completion time
a__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[deque[Process], deque[Process]]:
a__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
a__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
a__ = 0
# set the finish time
a__ = self.current_time
# update the process' turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _UpperCAmelCase ( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
a__ , a__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a_ : Tuple = Process('P1', 0, 53)
a_ : str = Process('P2', 0, 17)
a_ : Optional[int] = Process('P3', 0, 68)
a_ : Optional[Any] = Process('P4', 0, 24)
a_ : Tuple = 3
a_ : Tuple = [17, 25]
a_ : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
a_ : Any = Process('P1', 0, 53)
a_ : Union[str, Any] = Process('P2', 0, 17)
a_ : Optional[int] = Process('P3', 0, 68)
a_ : List[Any] = Process('P4', 0, 24)
a_ : str = 3
a_ : Optional[Any] = [17, 25]
a_ : List[str] = deque([Pa, Pa, Pa, Pa])
a_ : int = MLFQ(number_of_queues, time_slices, queue, 0)
a_ : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 148 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Any = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 148 | 1 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__UpperCamelCase :int = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
if hor == 128:
UpperCAmelCase = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
UpperCAmelCase = (32, 128, 256)
UpperCAmelCase = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
UpperCAmelCase = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
UpperCAmelCase = (32, 64, 128, 256)
UpperCAmelCase = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
UpperCAmelCase = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65_536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
UpperCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65_536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
UpperCAmelCase = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
UpperCAmelCase = model
UpperCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
UpperCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 570 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XLNetTokenizer
_lowerCamelCase =XLNetTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XLNetTokenizer(a__ , keep_accents=a__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = '''<s>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(a__ ) , 1006 )
def __snake_case ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = XLNetTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [285, 46, 10, 170, 382] )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __snake_case ( self : Tuple ):
UpperCAmelCase = XLNetTokenizer(a__ , do_lower_case=a__ )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = XLNetTokenizer(a__ , do_lower_case=a__ )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __snake_case ( self : Dict ):
# fmt: off
UpperCAmelCase = {'''input_ids''': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 570 | 1 |
def __a ( lowerCAmelCase_ : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
UpperCAmelCase_= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCAmelCase_= 1
if upper_limit > 0:
UpperCAmelCase_= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(__lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__A = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 593 |
def __lowerCAmelCase ( __lowerCamelCase : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | 0 |
def UpperCamelCase ( _A : str , _A : int )-> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_A ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 232 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self ):
A__ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
A__ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(UpperCAmelCase__ )
from datasets import load_dataset
A__ = load_dataset("nielsr/rvlcdip-demo" )
A__ = dataset["train"][0]["image"].convert("RGB" )
A__ = image_processor(UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
A__ = outputs.logits
A__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=UpperCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 232 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :str = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = 'fnet'
def __init__( self : Optional[Any] , __lowercase : List[str]=32_000 , __lowercase : str=768 , __lowercase : Optional[Any]=12 , __lowercase : Optional[int]=3_072 , __lowercase : int="gelu_new" , __lowercase : Tuple=0.1 , __lowercase : Optional[Any]=512 , __lowercase : List[Any]=4 , __lowercase : Any=0.0_2 , __lowercase : str=1e-12 , __lowercase : int=False , __lowercase : Union[str, Any]=512 , __lowercase : str=3 , __lowercase : Optional[int]=1 , __lowercase : int=2 , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : List[str] = use_tpu_fourier_optimizations
__UpperCAmelCase : Optional[int] = tpu_short_seq_length | 522 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :List[Any] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'mctct'
def __init__( self : List[Any] , __lowercase : Optional[int]=8_065 , __lowercase : Union[str, Any]=1_536 , __lowercase : str=36 , __lowercase : Optional[int]=6_144 , __lowercase : Union[str, Any]=4 , __lowercase : str=384 , __lowercase : str=920 , __lowercase : List[str]=1e-5 , __lowercase : str=0.3 , __lowercase : Union[str, Any]="relu" , __lowercase : List[str]=0.0_2 , __lowercase : List[Any]=0.3 , __lowercase : Tuple=0.3 , __lowercase : int=1 , __lowercase : str=0 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[Any]=1 , __lowercase : Any=0.3 , __lowercase : int=1 , __lowercase : Optional[Any]=(7,) , __lowercase : List[str]=(3,) , __lowercase : int=80 , __lowercase : Any=1 , __lowercase : Union[str, Any]=None , __lowercase : Optional[Any]="sum" , __lowercase : int=False , **__lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Tuple = attention_head_dim
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[int] = layerdrop
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = pad_token_id
__UpperCAmelCase : Union[str, Any] = bos_token_id
__UpperCAmelCase : Optional[int] = eos_token_id
__UpperCAmelCase : Optional[Any] = conv_glu_dim
__UpperCAmelCase : List[str] = conv_dropout
__UpperCAmelCase : str = num_conv_layers
__UpperCAmelCase : int = input_feat_per_channel
__UpperCAmelCase : Any = input_channels
__UpperCAmelCase : int = conv_channels
__UpperCAmelCase : List[Any] = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCAmelCase : str = list(__lowercase )
__UpperCAmelCase : Union[str, Any] = list(__lowercase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) | 522 | 1 |
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = [1]
for i in range(2 , lowerCAmelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = list(range(lowerCAmelCase__ ) )
# Find permutation
while factorials:
_lowerCAmelCase : Union[str, Any] = factorials.pop()
_lowerCAmelCase , _lowerCAmelCase : Dict = divmod(lowerCAmelCase__ , lowerCAmelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587 | # Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self , _snake_case = True ):
_lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCAmelCase : List[Any] = directed
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_lowerCAmelCase : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
_lowerCAmelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase : List[str] = [destination_vertex]
_lowerCAmelCase : Dict = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
_lowerCAmelCase : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase : Dict = [destination_vertex]
_lowerCAmelCase : List[str] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 587 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """AutoImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] ) -> Tuple:
super().__init__(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase : str = self.image_processor
def __call__( self : Union[str, Any] , __snake_case : List[str]=None , __snake_case : str=None , __snake_case : Optional[Any]=None , **__snake_case : Union[str, Any] ) -> Any:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : Union[str, Any] = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if images is not None:
UpperCAmelCase : Any = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
UpperCAmelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def A ( self : Union[str, Any] , *__snake_case : int , **__snake_case : Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def A ( self : int , *__snake_case : List[Any] , **__snake_case : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def A ( self : Optional[Any] ) -> Dict:
return ["input_ids", "attention_mask", "pixel_values"]
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__: Union[str, Any] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 528 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
lowercase = set({"(", "[", "{"} )
lowercase = set({")", "]", "}"} )
lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = input("Enter sequence of brackets: " )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , "is balanced" )
else:
print(lowerCAmelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 310 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : Any = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
__lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
__lowerCamelCase : int = {f"funnel-transformer/{name}": 512 for name in _model_names}
__lowerCamelCase : str = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : List[str] = FunnelTokenizer
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = 2
def __init__(self : Tuple , A__ : Optional[int]=None , A__ : List[Any]=None , A__ : Optional[int]=True , A__ : Optional[int]="<unk>" , A__ : List[Any]="<sep>" , A__ : Optional[int]="<pad>" , A__ : str="<cls>" , A__ : Any="<mask>" , A__ : int="<s>" , A__ : Union[str, Any]="</s>" , A__ : str=True , A__ : int=True , A__ : Dict=None , A__ : Union[str, Any]="##" , **A__ : str , ) -> Union[str, Any]:
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , bos_token=A__ , eos_token=A__ , clean_text=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , wordpieces_prefix=A__ , **A__ , )
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A__ ) != do_lower_case
or normalizer_state.get("strip_accents" , A__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A__ ) != tokenize_chinese_chars
):
lowercase = getattr(A__ , normalizer_state.pop("type" ) )
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**A__ )
lowercase = do_lower_case
def UpperCAmelCase__ (self : List[Any] , A__ : Optional[int] , A__ : Any=None ) -> Dict:
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self : Union[str, Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
lowercase = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 310 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus" ) -> dict:
lowerCAmelCase__ : List[str] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , 'html.parser' )
lowerCAmelCase__ : int = soup.findAll('h1' )
lowerCAmelCase__ : Optional[int] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""") | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a = "sshleifer/mar_enro_6_3_student"
class _A ( __lowercase ):
def UpperCAmelCase ( self ):
super().setUp()
_UpperCAmelCase = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCAmelCase ( self ):
MarianMTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def UpperCAmelCase ( self ):
_UpperCAmelCase = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
_UpperCAmelCase = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
_UpperCAmelCase = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
_UpperCAmelCase = bash_script.replace(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_UpperCAmelCase = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_UpperCAmelCase = ["""finetune.py"""] + bash_script.split() + args
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SummarizationModule.add_model_specific_args(_SCREAMING_SNAKE_CASE , os.getcwd() )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = main(_SCREAMING_SNAKE_CASE )
# Check metrics
_UpperCAmelCase = load_json(model.metrics_save_path )
_UpperCAmelCase = metrics["""val"""][0]
_UpperCAmelCase = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _SCREAMING_SNAKE_CASE )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_UpperCAmelCase = os.listdir(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [x for x in contents if x.endswith(""".ckpt""" )][0]
_UpperCAmelCase = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
_UpperCAmelCase = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_UpperCAmelCase = {os.path.basename(_SCREAMING_SNAKE_CASE ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class _A ( __lowercase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCAmelCase ( self ):
_UpperCAmelCase = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
_UpperCAmelCase = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
_UpperCAmelCase = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
_UpperCAmelCase = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
_UpperCAmelCase = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
_UpperCAmelCase = bash_script.replace(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = bash_script.replace("""--fp16""" , """""" )
_UpperCAmelCase = 6
_UpperCAmelCase = (
["""distillation.py"""]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"""--gpus=1""",
"""--learning_rate=1e-3""",
F"--num_train_epochs={epochs}",
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = argparse.ArgumentParser()
_UpperCAmelCase = pl.Trainer.add_argparse_args(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SummarizationDistiller.add_model_specific_args(_SCREAMING_SNAKE_CASE , os.getcwd() )
_UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_UpperCAmelCase = distill_main(_SCREAMING_SNAKE_CASE )
# Check metrics
_UpperCAmelCase = load_json(model.metrics_save_path )
_UpperCAmelCase = metrics["""val"""][0]
_UpperCAmelCase = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _SCREAMING_SNAKE_CASE )
# check lightning ckpt can be loaded and has a reasonable statedict
_UpperCAmelCase = os.listdir(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [x for x in contents if x.endswith(""".ckpt""" )][0]
_UpperCAmelCase = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
_UpperCAmelCase = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_UpperCAmelCase = {os.path.basename(_SCREAMING_SNAKE_CASE ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1 | 518 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCAmelCase ( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
__a = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
_UpperCAmelCase = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def UpperCAmelCase ( self ):
_UpperCAmelCase = MobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[int]:
return torch.tensor(
snake_case , dtype=torch.long , device=snake_case , )
a = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self ):
_UpperCAmelCase = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 518 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = """markuplm"""
def __init__( self , SCREAMING_SNAKE_CASE_=30_522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3_072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=1_024 , SCREAMING_SNAKE_CASE_=216 , SCREAMING_SNAKE_CASE_=1_001 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=50 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
# additional properties
__snake_case = max_depth
__snake_case = max_xpath_tag_unit_embeddings
__snake_case = max_xpath_subs_unit_embeddings
__snake_case = tag_pad_id
__snake_case = subs_pad_id
__snake_case = xpath_unit_hidden_size
| 345 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ = None ):
if components is None:
__snake_case = []
__snake_case = list(SCREAMING_SNAKE_CASE_ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE_ , self.__components ) ) + ")"
def __add__( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = len(self )
if size == len(SCREAMING_SNAKE_CASE_ ):
__snake_case = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
else:
raise Exception('must have the same size' )
def __sub__( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = len(self )
if size == len(SCREAMING_SNAKE_CASE_ ):
__snake_case = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_ ):
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_ ):
...
def __mul__( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ):
__snake_case = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(self ) == len(SCREAMING_SNAKE_CASE_ ):
__snake_case = len(self )
__snake_case = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ )]
return sum(SCREAMING_SNAKE_CASE_ )
else: # error case
raise Exception('invalid operand!' )
def __lowerCamelCase ( self ):
return Vector(self.__components )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert -len(self.__components ) <= pos < len(self.__components )
__snake_case = value
def __lowerCamelCase ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__snake_case = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ):
__snake_case = self * other
__snake_case = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowercase( __snake_case : int ) -> Vector:
assert isinstance(__snake_case ,__snake_case )
return Vector([0] * dimension )
def __lowercase( __snake_case : int ,__snake_case : int ) -> Vector:
assert isinstance(__snake_case ,__snake_case ) and (isinstance(__snake_case ,__snake_case ))
__snake_case = [0] * dimension
__snake_case = 1
return Vector(__snake_case )
def __lowercase( __snake_case : float ,__snake_case : Vector ,__snake_case : Vector ) -> Vector:
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (isinstance(__snake_case ,(int, float) ))
)
return x * scalar + y
def __lowercase( __snake_case : int ,__snake_case : int ,__snake_case : int ) -> Vector:
random.seed(__snake_case )
__snake_case = [random.randint(__snake_case ,__snake_case ) for _ in range(__snake_case )]
return Vector(__snake_case )
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = matrix
__snake_case = w
__snake_case = h
def __str__( self ):
__snake_case = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , SCREAMING_SNAKE_CASE_ ):
if self.__width == other.width() and self.__height == other.height():
__snake_case = []
for i in range(self.__height ):
__snake_case = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE_ )
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , SCREAMING_SNAKE_CASE_ ):
if self.__width == other.width() and self.__height == other.height():
__snake_case = []
for i in range(self.__height ):
__snake_case = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE_ )
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_ ):
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_ ):
...
def __mul__( self , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # matrix-vector
if len(SCREAMING_SNAKE_CASE_ ) == self.__width:
__snake_case = zero_vector(self.__height )
for i in range(self.__height ):
__snake_case = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE_ )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE_ , sum(SCREAMING_SNAKE_CASE_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ): # matrix-scalar
__snake_case = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height )
return None
def __lowerCamelCase ( self ):
return self.__height
def __lowerCamelCase ( self ):
return self.__width
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case = value
else:
raise Exception('change_component: indices out of bounds' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__snake_case = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width - 1 , self.__height - 1 ).determinant()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
raise Exception('Indices out of bounds' )
def __lowerCamelCase ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE_ ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE_ )
def __lowercase( __snake_case : int ) -> Matrix:
__snake_case = [[0] * n for _ in range(__snake_case )]
return Matrix(__snake_case ,__snake_case ,__snake_case )
def __lowercase( __snake_case : int ,__snake_case : int ,__snake_case : int ,__snake_case : int ) -> Matrix:
random.seed(__snake_case )
__snake_case = [
[random.randint(__snake_case ,__snake_case ) for _ in range(__snake_case )] for _ in range(__snake_case )
]
return Matrix(__snake_case ,__snake_case ,__snake_case )
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''ctrl'''
SCREAMING_SNAKE_CASE_ = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_4_6_5_3_4 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=1_2_8_0 , __SCREAMING_SNAKE_CASE=8_1_9_2 , __SCREAMING_SNAKE_CASE=4_8 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : Tuple = n_positions
UpperCamelCase__ : Optional[int] = n_embd
UpperCamelCase__ : int = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : Dict = dff
UpperCamelCase__ : str = resid_pdrop
UpperCamelCase__ : Optional[int] = embd_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 285 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ="▁"
lowerCamelCase ={"vocab_file": "spiece.model"}
lowerCamelCase ={
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowerCamelCase ={
"google/reformer-crime-and-punishment": 5_2_4_2_8_8,
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE=[] , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
UpperCamelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict = vocab_file
UpperCamelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase__ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = self.__dict__.copy()
UpperCamelCase__ : int = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : int = {}
UpperCamelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCamelCase__ : str = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
UpperCamelCase__ : str = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 285 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if not sentence:
return ""
SCREAMING_SNAKE_CASE = dict(zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 406 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=1_0_0_0 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE = n - 1
SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE = 0
while count < prec:
SCREAMING_SNAKE_CASE = random.randint(2, n - 1 )
SCREAMING_SNAKE_CASE = bin_exp_mod(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if b != 1:
SCREAMING_SNAKE_CASE = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
snake_case = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 406 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 297 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """gptj"""
_a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase=50_400 , lowerCAmelCase=2_048 , lowerCAmelCase=4_096 , lowerCAmelCase=28 , lowerCAmelCase=16 , lowerCAmelCase=64 , lowerCAmelCase=None , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1e-5 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=50_256 , lowerCAmelCase=50_256 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =n_positions
_lowercase =n_embd
_lowercase =n_layer
_lowercase =n_head
_lowercase =n_inner
_lowercase =rotary_dim
_lowercase =activation_function
_lowercase =resid_pdrop
_lowercase =embd_pdrop
_lowercase =attn_pdrop
_lowercase =layer_norm_epsilon
_lowercase =initializer_range
_lowercase =use_cache
_lowercase =bos_token_id
_lowercase =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = "default" , lowerCAmelCase = None , lowerCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase ):
# TODO: how to do that better?
_lowercase =0
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
_lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowercase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase =seqlen + 2
_lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowercase =common_inputs['attention_mask']
if self.use_past:
_lowercase =ordered_inputs['attention_mask'].dtype
_lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A__ ( self ) -> int:
'''simple docstring'''
return 13
| 291 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :List[Any] ="""SpeechT5FeatureExtractor"""
a_ :Dict ="""SpeechT5Tokenizer"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.pop("""text_target""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.pop("""audio_target""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE__ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__a = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif text is not None:
__a = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
__a = None
if audio_target is not None:
__a = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = targets["""input_values"""]
elif text_target is not None:
__a = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = targets["""input_ids"""]
else:
__a = None
if inputs is None:
return targets
if targets is not None:
__a = labels
__a = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def __a ( self : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a = kwargs.pop("""input_values""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.pop("""input_ids""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.pop("""labels""" , SCREAMING_SNAKE_CASE__ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__a = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif input_ids is not None:
__a = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
__a = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and "input_ids" in labels[0]):
__a = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = targets["""input_ids"""]
else:
__a = self.feature_extractor.feature_size
__a = self.feature_extractor.num_mel_bins
__a = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = feature_size_hack
__a = targets["""input_values"""]
else:
__a = None
if inputs is None:
return targets
if targets is not None:
__a = labels
__a = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def __a ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __a ( self : int , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 201 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__a = """"""
for word_or_phrase in separated:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 201 | 1 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Union[str, Any] , snake_case_ : int = 6)-> None:
__lowerCAmelCase =None
__lowerCAmelCase =None
self.create_linked_list(snake_case_)
def UpperCamelCase ( self : Tuple , snake_case_ : int)-> None:
__lowerCAmelCase =Node()
__lowerCAmelCase =current_node
__lowerCAmelCase =current_node
__lowerCAmelCase =current_node
for _ in range(1 , snake_case_):
__lowerCAmelCase =Node()
__lowerCAmelCase =current_node
__lowerCAmelCase =previous_node
__lowerCAmelCase =current_node
__lowerCAmelCase =self.front
__lowerCAmelCase =previous_node
def UpperCamelCase ( self : Optional[int])-> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCamelCase ( self : str)-> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCamelCase ( self : List[str] , snake_case_ : Any)-> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__lowerCAmelCase =self.rear.next
if self.rear:
__lowerCAmelCase =data
def UpperCamelCase ( self : Union[str, Any])-> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__lowerCAmelCase =self.front.data
__lowerCAmelCase =None
return data
__lowerCAmelCase =self.front
__lowerCAmelCase =old_front.next
__lowerCAmelCase =old_front.data
__lowerCAmelCase =None
return data
def UpperCamelCase ( self : str)-> None:
if self.is_empty():
raise Exception("""Empty Queue""")
def UpperCamelCase ( self : List[Any])-> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class __a :
def __init__( self : Optional[int])-> None:
__lowerCAmelCase =None
__lowerCAmelCase =None
__lowerCAmelCase =None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : int , snake_case_ : Dict , snake_case_ : List[str]=12 , snake_case_ : Optional[int]=7 , snake_case_ : Optional[Any]=True , snake_case_ : int=True , snake_case_ : int=True , snake_case_ : Any=99 , snake_case_ : List[Any]=32 , snake_case_ : List[Any]=32 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Tuple=0.1 , snake_case_ : str=0.1 , snake_case_ : Any=5_12 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : List[Any]=0 , snake_case_ : Union[str, Any]=None , )-> List[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_mask
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =projection_dim
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =initializer_range
__lowerCAmelCase =scope
__lowerCAmelCase =bos_token_id
def UpperCamelCase ( self : List[Any])-> Optional[int]:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =None
if self.use_input_mask:
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
__lowerCAmelCase =input_mask.numpy()
__lowerCAmelCase , __lowerCAmelCase =input_mask.shape
__lowerCAmelCase =np.random.randint(1 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(snake_case_):
__lowerCAmelCase =1
__lowerCAmelCase =0
__lowerCAmelCase =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case_)
def UpperCamelCase ( self : List[str])-> str:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[Any])-> List[str]:
__lowerCAmelCase =TFBlipTextModel(config=snake_case_)
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , training=snake_case_)
__lowerCAmelCase =model(snake_case_ , training=snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCamelCase ( self : Dict)-> Tuple:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Optional[Any])-> Any:
__lowerCAmelCase =BlipTextModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , hidden_size=37)
def UpperCamelCase ( self : List[Any])-> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[str])-> Dict:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def UpperCamelCase ( self : Optional[Any])-> str:
pass
def UpperCamelCase ( self : List[str])-> int:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""")
def UpperCamelCase ( self : Optional[Any])-> int:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def UpperCamelCase ( self : List[Any])-> str:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""")
def UpperCamelCase ( self : Optional[int])-> Tuple:
pass
@slow
def UpperCamelCase ( self : int)-> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase =TFBlipTextModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
def UpperCamelCase ( self : str , snake_case_ : List[str]=True)-> int:
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case_)
| 354 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCamelCase__ = threading.Lock()
lowerCamelCase__ = None
lowerCamelCase__ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowerCamelCase__ = logging.WARNING
lowerCamelCase__ = True
def __lowerCAmelCase ():
__lowerCAmelCase : str = os.getenv('TRANSFORMERS_VERBOSITY' , _UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __lowerCAmelCase ():
return __name__.split('.' )[0]
def __lowerCAmelCase ():
return logging.getLogger(_get_library_name() )
def __lowerCAmelCase ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__lowerCAmelCase : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
__lowerCAmelCase : List[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
__lowerCAmelCase : Optional[int] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__lowerCAmelCase : int = False
def __lowerCAmelCase ():
global _default_handler
with _lock:
if not _default_handler:
return
__lowerCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__lowerCAmelCase : Dict = None
def __lowerCAmelCase ():
return log_levels
def __lowerCAmelCase (_UpperCamelCase = None ):
if name is None:
__lowerCAmelCase : Any = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCamelCase )
def __lowerCAmelCase ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCAmelCase (_UpperCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCamelCase )
def __lowerCAmelCase ():
return set_verbosity(_UpperCamelCase )
def __lowerCAmelCase ():
return set_verbosity(_UpperCamelCase )
def __lowerCAmelCase ():
return set_verbosity(_UpperCamelCase )
def __lowerCAmelCase ():
return set_verbosity(_UpperCamelCase )
def __lowerCAmelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCAmelCase ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCAmelCase (_UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCamelCase )
def __lowerCAmelCase ():
_configure_library_root_logger()
__lowerCAmelCase : Tuple = False
def __lowerCAmelCase ():
_configure_library_root_logger()
__lowerCAmelCase : List[str] = True
def __lowerCAmelCase ():
__lowerCAmelCase : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
__lowerCAmelCase : Optional[int] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(_UpperCamelCase )
def __lowerCAmelCase ():
__lowerCAmelCase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCamelCase )
def __lowerCAmelCase (self , *_UpperCamelCase , **_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , _UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*_UpperCamelCase , **_UpperCamelCase )
lowerCamelCase__ = warning_advice
@functools.lru_cache(_UpperCamelCase )
def __lowerCAmelCase (self , *_UpperCamelCase , **_UpperCamelCase ):
self.warning(*_UpperCamelCase , **_UpperCamelCase )
lowerCamelCase__ = warning_once
class A__ :
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
__lowerCAmelCase : Dict = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _SCREAMING_SNAKE_CASE ):
def empty_fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return
class A__ :
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
if _tqdm_active:
return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase__ = _tqdm_cls()
def __lowerCAmelCase ():
global _tqdm_active
return bool(_tqdm_active )
def __lowerCAmelCase ():
global _tqdm_active
__lowerCAmelCase : List[Any] = True
hf_hub_utils.enable_progress_bars()
def __lowerCAmelCase ():
global _tqdm_active
__lowerCAmelCase : str = False
hf_hub_utils.disable_progress_bars() | 549 |
"""simple docstring"""
# Imports
import numpy as np
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if red is not None:
__lowerCAmelCase : Optional[int] = red
if green is not None:
__lowerCAmelCase : Any = green
if blue is not None:
__lowerCAmelCase : str = blue
if red_edge is not None:
__lowerCAmelCase : Dict = red_edge
if nir is not None:
__lowerCAmelCase : Dict = nir
return True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def __lowerCamelCase ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCamelCase ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCamelCase ( self ):
return self.nir * (self.red / (self.green**2))
def __lowerCamelCase ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCamelCase ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCamelCase ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCamelCase ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCamelCase ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.08 , _SCREAMING_SNAKE_CASE=1.22 , _SCREAMING_SNAKE_CASE=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCamelCase ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCamelCase ( self ):
return (self.nir / self.green) - 1
def __lowerCamelCase ( self ):
return (self.nir / self.redEdge) - 1
def __lowerCamelCase ( self ):
return (self.red - self.blue) / self.red
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCamelCase ( self ):
return self.nir - self.green
def __lowerCamelCase ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCamelCase ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
return (self.nir - b) / (a * self.red)
def __lowerCamelCase ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCamelCase ( self ):
return (self.red + self.green + self.blue) / 30.5
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCamelCase ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCamelCase ( self ):
return self.green / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.nir / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return self.red / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
return (self.green - self.red) / (self.green + self.red)
def __lowerCamelCase ( self ):
return (self.red - self.green) / (self.red + self.green)
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCamelCase ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCamelCase ( self ):
return self.nir / self.red
def __lowerCamelCase ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCamelCase ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 549 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_UpperCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowerCAmelCase__( lowercase : str = "dhaka" , lowercase : int = 5 ) -> Optional[int]:
__snake_case : List[str] = min(lowerCAmelCase__ , 50 ) # Prevent abuse!
__snake_case : Union[str, Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
__snake_case : Optional[int] = requests.get("https://www.google.com/search" , params=lowerCAmelCase__ , headers=lowerCAmelCase__ )
__snake_case : Tuple = BeautifulSoup(html.text , "html.parser" )
__snake_case : Optional[int] = ''''''.join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__snake_case : List[str] = json.dumps(lowerCAmelCase__ )
__snake_case : List[str] = json.loads(lowerCAmelCase__ )
__snake_case : List[Any] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowerCAmelCase__ , )
if not matched_google_image_data:
return 0
__snake_case : str = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowerCAmelCase__ ) , )
__snake_case : Union[str, Any] = re.findall(
R"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowerCAmelCase__ , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase__ ):
if index >= max_images:
return index
__snake_case : List[str] = bytes(lowerCAmelCase__ , "ascii" ).decode(
"unicode-escape" )
__snake_case : Optional[Any] = bytes(lowerCAmelCase__ , "ascii" ).decode(
"unicode-escape" )
__snake_case : Dict = urllib.request.build_opener()
__snake_case : List[str] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(lowerCAmelCase__ )
__snake_case : Optional[Any] = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase__ , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_UpperCamelCase = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 243 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class UpperCamelCase__ ( nn.Module ,__lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
_SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1_280, 1_280)
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
_SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
_SCREAMING_SNAKE_CASE : int = 1_280
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = False
def lowerCAmelCase (self : Dict , snake_case_ : jax.random.KeyArray ):
# init input tensors
__a : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
__a : List[str] = jnp.zeros(snake_case_ , dtype=jnp.floataa )
__a : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__a : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__a , __a : Dict = jax.random.split(snake_case_ )
__a : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(snake_case_ , snake_case_ , snake_case_ , snake_case_ )["params"]
def lowerCAmelCase (self : Tuple ):
__a : Tuple = self.block_out_channels
__a : List[str] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__a : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
__a : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__a : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__a : Any = FlaxTimestepEmbedding(snake_case_ , dtype=self.dtype )
__a : str = self.only_cross_attention
if isinstance(snake_case_ , snake_case_ ):
__a : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case_ , snake_case_ ):
__a : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__a : Dict = []
__a : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__a : Any = output_channel
__a : Union[str, Any] = block_out_channels[i]
__a : List[str] = i == len(snake_case_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__a : Any = FlaxCrossAttnDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : int = FlaxDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case_ )
__a : Dict = down_blocks
# mid
__a : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__a : List[str] = []
__a : str = list(reversed(snake_case_ ) )
__a : Optional[int] = list(reversed(snake_case_ ) )
__a : Optional[int] = list(reversed(snake_case_ ) )
__a : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__a : Optional[int] = output_channel
__a : List[Any] = reversed_block_out_channels[i]
__a : str = reversed_block_out_channels[min(i + 1 , len(snake_case_ ) - 1 )]
__a : List[Any] = i == len(snake_case_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__a : Optional[Any] = FlaxCrossAttnUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : Dict = FlaxUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case_ )
__a : str = output_channel
__a : Any = up_blocks
# out
__a : List[str] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
__a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : str , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : bool = True , snake_case_ : bool = False , ):
# 1. time
if not isinstance(snake_case_ , jnp.ndarray ):
__a : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__a : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__a : List[Any] = jnp.expand_dims(snake_case_ , 0 )
__a : Tuple = self.time_proj(snake_case_ )
__a : Tuple = self.time_embedding(snake_case_ )
# 2. pre-process
__a : Union[str, Any] = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
__a : List[Any] = self.conv_in(snake_case_ )
# 3. down
__a : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case_ , snake_case_ ):
__a , __a : Dict = down_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
else:
__a , __a : Dict = down_block(snake_case_ , snake_case_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case_ , snake_case_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__a : List[str] = new_down_block_res_samples
# 4. mid
__a : Union[str, Any] = self.mid_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
__a : Dict = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case_ , snake_case_ ):
__a : int = up_block(
snake_case_ , temb=snake_case_ , encoder_hidden_states=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train , )
else:
__a : Optional[Any] = up_block(snake_case_ , temb=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train )
# 6. post-process
__a : str = self.conv_norm_out(snake_case_ )
__a : Union[str, Any] = nn.silu(snake_case_ )
__a : Any = self.conv_out(snake_case_ )
__a : Dict = jnp.transpose(snake_case_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case_ )
| 521 | 0 |
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase_ : int ) -> list[int]:
lowerCAmelCase__ = [True] * limit
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCAmelCase__ = i * 2
while index < limit:
lowerCAmelCase__ = False
lowerCAmelCase__ = index + i
lowerCAmelCase__ = [2]
for i in range(3 , UpperCamelCase_ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase_ )
return primes
def _UpperCamelCase ( UpperCamelCase_ : int = 100_0000 ) -> int:
lowerCAmelCase__ = prime_sieve(UpperCamelCase_ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + length , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase__ = j - i
lowerCAmelCase__ = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 700 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : int = logging.get_logger("""transformers.models.encodec""")
__snake_case : Tuple = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case : List[Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case : str = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case : str = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case : Any = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Union[str, Any] = []
__snake_case : Tuple = []
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase__ = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase__ = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase__ = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase_ , UpperCamelCase_ ):
logger.info(F"{name} was ignored" )
continue
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase__ , lowerCAmelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase__ = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
elif "running_mean" in name:
lowerCAmelCase__ = 'running_mean'
elif "running_var" in name:
lowerCAmelCase__ = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase__ = 'num_batches_tracked'
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ = EncodecConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase__ = [8, 5, 4, 4]
lowerCAmelCase__ = [2.2]
lowerCAmelCase__ = 64
lowerCAmelCase__ = 3_2000
lowerCAmelCase__ = 2048
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
elif model_name == "encodec_48khz":
lowerCAmelCase__ = [8, 5, 4, 2]
lowerCAmelCase__ = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase__ = 4_8000
lowerCAmelCase__ = 2
lowerCAmelCase__ = False
lowerCAmelCase__ = 'time_group_norm'
lowerCAmelCase__ = True
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
lowerCAmelCase__ = EncodecModel(UpperCamelCase_ )
lowerCAmelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = torch.load(UpperCamelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase__ = original_checkpoint['best_state']
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case : Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 365 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=0.0 , _snake_case = None , _snake_case = "geglu" , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = True , _snake_case = "layer_norm" , _snake_case = False , ):
super().__init__()
_UpperCAmelCase =only_cross_attention
_UpperCAmelCase =(num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
_UpperCAmelCase =(num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_UpperCAmelCase =AdaLayerNorm(__a , __a )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase =AdaLayerNormZero(__a , __a )
else:
_UpperCAmelCase =nn.LayerNorm(__a , elementwise_affine=__a )
_UpperCAmelCase =Attention(
query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_UpperCAmelCase =(
AdaLayerNorm(__a , __a )
if self.use_ada_layer_norm
else nn.LayerNorm(__a , elementwise_affine=__a )
)
_UpperCAmelCase =Attention(
query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none
else:
_UpperCAmelCase =None
_UpperCAmelCase =None
# 3. Feed-forward
_UpperCAmelCase =nn.LayerNorm(__a , elementwise_affine=__a )
_UpperCAmelCase =FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a )
# let chunk size default to None
_UpperCAmelCase =None
_UpperCAmelCase =0
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
# Sets chunk feed-forward
_UpperCAmelCase =chunk_size
_UpperCAmelCase =dim
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_UpperCAmelCase =self.norma(__a , __a )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase =self.norma(
__a , __a , __a , hidden_dtype=hidden_states.dtype )
else:
_UpperCAmelCase =self.norma(__a )
_UpperCAmelCase =cross_attention_kwargs if cross_attention_kwargs is not None else {}
_UpperCAmelCase =self.attna(
__a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase =gate_msa.unsqueeze(1 ) * attn_output
_UpperCAmelCase =attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_UpperCAmelCase =(
self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a )
)
_UpperCAmelCase =self.attna(
__a , encoder_hidden_states=__a , attention_mask=__a , **__a , )
_UpperCAmelCase =attn_output + hidden_states
# 3. Feed-forward
_UpperCAmelCase =self.norma(__a )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase =norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
_UpperCAmelCase =norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_UpperCAmelCase =torch.cat(
[self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_UpperCAmelCase =self.ff(__a )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase =gate_mlp.unsqueeze(1 ) * ff_output
_UpperCAmelCase =ff_output + hidden_states
return hidden_states
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case = None , _snake_case = 4 , _snake_case = 0.0 , _snake_case = "geglu" , _snake_case = False , ):
super().__init__()
_UpperCAmelCase =int(dim * mult )
_UpperCAmelCase =dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_UpperCAmelCase =GELU(__a , __a )
if activation_fn == "gelu-approximate":
_UpperCAmelCase =GELU(__a , __a , approximate="tanh" )
elif activation_fn == "geglu":
_UpperCAmelCase =GEGLU(__a , __a )
elif activation_fn == "geglu-approximate":
_UpperCAmelCase =ApproximateGELU(__a , __a )
_UpperCAmelCase =nn.ModuleList([] )
# project in
self.net.append(__a )
# project dropout
self.net.append(nn.Dropout(__a ) )
# project out
self.net.append(nn.Linear(__a , __a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a ) )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
for module in self.net:
_UpperCAmelCase =module(__a )
return hidden_states
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case = "none" ):
super().__init__()
_UpperCAmelCase =nn.Linear(__a , __a )
_UpperCAmelCase =approximate
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if gate.device.type != "mps":
return F.gelu(__a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =self.proj(__a )
_UpperCAmelCase =self.gelu(__a )
return hidden_states
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
super().__init__()
_UpperCAmelCase =nn.Linear(__a , dim_out * 2 )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if gate.device.type != "mps":
return F.gelu(__a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =self.proj(__a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__a )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
super().__init__()
_UpperCAmelCase =nn.Linear(__a , __a )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =self.proj(__a )
return x * torch.sigmoid(1.702 * x )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
super().__init__()
_UpperCAmelCase =nn.Embedding(__a , __a )
_UpperCAmelCase =nn.SiLU()
_UpperCAmelCase =nn.Linear(__a , embedding_dim * 2 )
_UpperCAmelCase =nn.LayerNorm(__a , elementwise_affine=__a )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =self.linear(self.silu(self.emb(__a ) ) )
_UpperCAmelCase =torch.chunk(__a , 2 )
_UpperCAmelCase =self.norm(__a ) * (1 + scale) + shift
return x
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
super().__init__()
_UpperCAmelCase =CombinedTimestepLabelEmbeddings(__a , __a )
_UpperCAmelCase =nn.SiLU()
_UpperCAmelCase =nn.Linear(__a , 6 * embedding_dim , bias=__a )
_UpperCAmelCase =nn.LayerNorm(__a , elementwise_affine=__a , eps=1E-6 )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
_UpperCAmelCase =self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) )
_UpperCAmelCase =emb.chunk(6 , dim=1 )
_UpperCAmelCase =self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = 1E-5 ):
super().__init__()
_UpperCAmelCase =num_groups
_UpperCAmelCase =eps
if act_fn is None:
_UpperCAmelCase =None
else:
_UpperCAmelCase =get_activation(__a )
_UpperCAmelCase =nn.Linear(__a , out_dim * 2 )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
if self.act:
_UpperCAmelCase =self.act(__a )
_UpperCAmelCase =self.linear(__a )
_UpperCAmelCase =emb[:, :, None, None]
_UpperCAmelCase =emb.chunk(2 , dim=1 )
_UpperCAmelCase =F.group_norm(__a , self.num_groups , eps=self.eps )
_UpperCAmelCase =x * (1 + scale) + shift
return x
| 408 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "dpr"
def __init__( self : Union[str, Any] , __a : Optional[Any]=30_522 , __a : List[Any]=768 , __a : List[Any]=12 , __a : Dict=12 , __a : Union[str, Any]=3_072 , __a : Any="gelu" , __a : Any=0.1 , __a : Any=0.1 , __a : Tuple=512 , __a : int=2 , __a : Optional[Any]=0.02 , __a : List[Any]=1e-12 , __a : int=0 , __a : int="absolute" , __a : int = 0 , **__a : Optional[int] , ) ->Tuple:
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Tuple = type_vocab_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : List[str] = projection_dim
lowerCamelCase_ : Optional[int] = position_embedding_type
| 278 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _A ( __lowerCAmelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = q_groups
UpperCamelCase__ = k_groups
UpperCamelCase__ = v_groups
UpperCamelCase__ = post_attention_groups
UpperCamelCase__ = intermediate_groups
UpperCamelCase__ = output_groups
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ) -> Tuple:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = SqueezeBertModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCamelCase__ = model(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = SqueezeBertForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCamelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = SqueezeBertForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCamelCase__ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = SqueezeBertForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCamelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = SqueezeBertForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCamelCase__ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = SqueezeBertForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(UpperCamelCase__) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE_ : List[Any] =(
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = SqueezeBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_UpperCamelCase , dim=37 )
def _a (self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCamelCase )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCamelCase )
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCamelCase )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCamelCase )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCamelCase )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCamelCase )
@slow
def _a (self ) -> List[str]:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = SqueezeBertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _A ( unittest.TestCase ):
@slow
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
UpperCamelCase__ = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
UpperCamelCase__ = model(_UpperCamelCase )[0]
UpperCamelCase__ = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCamelCase__ = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-4 ) )
| 715 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ='''▁'''
__magic_name__ ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__magic_name__ ={
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__magic_name__ =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[int] =[]
SCREAMING_SNAKE_CASE_ : List[int] =[]
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ = legacy_behaviour
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ = 1
UpperCamelCase__ = len(self.sp_model )
UpperCamelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ )
}
UpperCamelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ = src_lang if src_lang is not None else '''eng_Latn'''
UpperCamelCase__ = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
UpperCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a (self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a (self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1] * len(self.prefix_tokens )
UpperCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ = src_lang
UpperCamelCase__ = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tgt_lang_id
return inputs
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a (self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
'''simple docstring'''
UpperCamelCase__ = src_lang
UpperCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _a (self ) -> Dict:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase__ = [self.cur_lang_code]
UpperCamelCase__ = [self.eos_token_id]
| 469 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 575 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : BigBirdConfig
__UpperCAmelCase : jnp.dtype = jnp.floataa
__UpperCAmelCase : bool = True
def snake_case ( self : Optional[Any] ):
super().setup()
__lowercase : str = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[Any] , *lowercase__ : Any , **lowercase__ : Optional[Any] ):
__lowercase : str = super().__call__(*lowercase__ , **lowercase__ )
__lowercase : Dict = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxBigBirdForNaturalQuestionsModule
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
def cross_entropy(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None ):
__lowercase : Union[str, Any] = logits.shape[-1]
__lowercase : List[str] = (labels[..., None] == jnp.arange(_lowerCamelCase )[None]).astype("f4" )
__lowercase : int = jax.nn.log_softmax(_lowerCamelCase, axis=-1 )
__lowercase : int = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
__lowercase : List[str] = reduction(_lowerCamelCase )
return loss
__lowercase : Union[str, Any] = partial(_lowerCamelCase, reduction=jnp.mean )
__lowercase : Dict = cross_entropy(_lowerCamelCase, _lowerCamelCase )
__lowercase : int = cross_entropy(_lowerCamelCase, _lowerCamelCase )
__lowercase : Union[str, Any] = cross_entropy(_lowerCamelCase, _lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = "google/bigbird-roberta-base"
__UpperCAmelCase : int = 3000
__UpperCAmelCase : int = 10500
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 3
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 5
# tx_args
__UpperCAmelCase : float = 3E-5
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : int = 20000
__UpperCAmelCase : float = 0.00_95
__UpperCAmelCase : str = "bigbird-roberta-natural-questions"
__UpperCAmelCase : str = "training-expt"
__UpperCAmelCase : str = "data/nq-training.jsonl"
__UpperCAmelCase : str = "data/nq-validation.jsonl"
def snake_case ( self : Tuple ):
os.makedirs(self.base_dir , exist_ok=lowercase__ )
__lowercase : int = os.path.join(self.base_dir , self.save_dir )
__lowercase : Tuple = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : int = 4096 # no dynamic padding on TPUs
def __call__( self : Dict , lowercase__ : Tuple ):
__lowercase : List[Any] = self.collate_fn(lowercase__ )
__lowercase : Dict = jax.tree_util.tree_map(lowercase__ , lowercase__ )
return batch
def snake_case ( self : str , lowercase__ : Union[str, Any] ):
__lowercase ,__lowercase : List[Any] = self.fetch_inputs(features["input_ids"] )
__lowercase : int = {
"input_ids": jnp.array(lowercase__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(lowercase__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def snake_case ( self : List[Any] , lowercase__ : list ):
__lowercase : str = [self._fetch_inputs(lowercase__ ) for ids in input_ids]
return zip(*lowercase__ )
def snake_case ( self : Any , lowercase__ : list ):
__lowercase : Optional[Any] = [1 for _ in range(len(lowercase__ ) )]
while len(lowercase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None ) ->Any:
"""simple docstring"""
if seed is not None:
__lowercase : Optional[Any] = dataset.shuffle(seed=_lowerCamelCase )
for i in range(len(_lowerCamelCase ) // batch_size ):
__lowercase : int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCamelCase )
@partial(jax.pmap, axis_name="batch" )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, **_lowerCamelCase ) ->Any:
"""simple docstring"""
def loss_fn(_lowerCamelCase ):
__lowercase : Dict = model_inputs.pop("start_labels" )
__lowercase : str = model_inputs.pop("end_labels" )
__lowercase : Union[str, Any] = model_inputs.pop("pooled_labels" )
__lowercase : List[str] = state.apply_fn(**_lowerCamelCase, params=_lowerCamelCase, dropout_rng=_lowerCamelCase, train=_lowerCamelCase )
__lowercase ,__lowercase ,__lowercase : List[str] = outputs
return state.loss_fn(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, )
__lowercase ,__lowercase : Any = jax.random.split(_lowerCamelCase )
__lowercase : Dict = jax.value_and_grad(_lowerCamelCase )
__lowercase ,__lowercase : Tuple = grad_fn(state.params )
__lowercase : str = jax.lax.pmean({"loss": loss}, axis_name="batch" )
__lowercase : str = jax.lax.pmean(_lowerCamelCase, "batch" )
__lowercase : Any = state.apply_gradients(grads=_lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name="batch" )
def snake_case__ ( _lowerCamelCase, **_lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = model_inputs.pop("start_labels" )
__lowercase : Optional[int] = model_inputs.pop("end_labels" )
__lowercase : Optional[Any] = model_inputs.pop("pooled_labels" )
__lowercase : Optional[int] = state.apply_fn(**_lowerCamelCase, params=state.params, train=_lowerCamelCase )
__lowercase ,__lowercase ,__lowercase : int = outputs
__lowercase : int = state.loss_fn(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__lowercase : str = jax.lax.pmean({"loss": loss}, axis_name="batch" )
return metrics
class lowerCAmelCase__ ( train_state.TrainState ):
"""simple docstring"""
__UpperCAmelCase : Callable = struct.field(pytree_node=lowerCAmelCase_ )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Args
__UpperCAmelCase : Callable
__UpperCAmelCase : Callable
__UpperCAmelCase : Callable
__UpperCAmelCase : Callable
__UpperCAmelCase : wandb
__UpperCAmelCase : Callable = None
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None ):
__lowercase : Optional[Any] = model.params
__lowercase : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , loss_fn=lowercase__ , )
if ckpt_dir is not None:
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : List[str] = restore_checkpoint(lowercase__ , lowercase__ )
__lowercase : Optional[Any] = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__lowercase ,__lowercase : Any = build_tx(**lowercase__ )
__lowercase : Any = train_state.TrainState(
step=lowercase__ , apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , opt_state=lowercase__ , )
__lowercase : List[Any] = args
__lowercase : List[str] = data_collator
__lowercase : Dict = lr
__lowercase : List[str] = params
__lowercase : str = jax_utils.replicate(lowercase__ )
return state
def snake_case ( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any ):
__lowercase : Tuple = self.args
__lowercase : Dict = len(lowercase__ ) // args.batch_size
__lowercase : Dict = jax.random.PRNGKey(0 )
__lowercase : Optional[Any] = jax.random.split(lowercase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowercase : Any = jnp.array(0 , dtype=jnp.floataa )
__lowercase : Union[str, Any] = get_batched_dataset(lowercase__ , args.batch_size , seed=lowercase__ )
__lowercase : Optional[int] = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc=f'Running EPOCH-{epoch}' ):
__lowercase : Any = self.data_collator(lowercase__ )
__lowercase ,__lowercase ,__lowercase : int = self.train_step_fn(lowercase__ , lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__lowercase : Union[str, Any] = jax_utils.unreplicate(state.step )
__lowercase : Tuple = running_loss.item() / i
__lowercase : Tuple = self.scheduler_fn(state_step - 1 )
__lowercase : Optional[int] = self.evaluate(lowercase__ , lowercase__ )
__lowercase : Union[str, Any] = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(lowercase__ ) )
self.logger.log(lowercase__ , commit=lowercase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase__ )
def snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
__lowercase : int = get_batched_dataset(lowercase__ , self.args.batch_size )
__lowercase : List[Any] = len(lowercase__ ) // self.args.batch_size
__lowercase : List[str] = jnp.array(0 , dtype=jnp.floataa )
__lowercase : Any = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc="Evaluating ... " ):
__lowercase : Optional[Any] = self.data_collator(lowercase__ )
__lowercase : Dict = self.val_step_fn(lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def snake_case ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Any ):
__lowercase : int = jax_utils.unreplicate(lowercase__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=" ... " )
self.model_save_fn(lowercase__ , params=state.params )
with open(os.path.join(lowercase__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowercase__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(lowercase__ , "data_collator.joblib" ) )
with open(os.path.join(lowercase__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , lowercase__ )
print("DONE" )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str:
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}', end=" ... " )
with open(os.path.join(_lowerCamelCase, "flax_model.msgpack" ), "rb" ) as f:
__lowercase : Union[str, Any] = from_bytes(state.params, f.read() )
with open(os.path.join(_lowerCamelCase, "opt_state.msgpack" ), "rb" ) as f:
__lowercase : Dict = from_bytes(state.opt_state, f.read() )
__lowercase : int = joblib.load(os.path.join(_lowerCamelCase, "args.joblib" ) )
__lowercase : Any = joblib.load(os.path.join(_lowerCamelCase, "data_collator.joblib" ) )
with open(os.path.join(_lowerCamelCase, "training_state.json" ), "r" ) as f:
__lowercase : int = json.load(_lowerCamelCase )
__lowercase : List[str] = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = num_train_steps - warmup_steps
__lowercase : Dict = optax.linear_schedule(init_value=_lowerCamelCase, end_value=_lowerCamelCase, transition_steps=_lowerCamelCase )
__lowercase : Optional[Any] = optax.linear_schedule(init_value=_lowerCamelCase, end_value=1E-7, transition_steps=_lowerCamelCase )
__lowercase : Dict = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
def weight_decay_mask(_lowerCamelCase ):
__lowercase : Tuple = traverse_util.flatten_dict(_lowerCamelCase )
__lowercase : int = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCamelCase )
__lowercase : Tuple = scheduler_fn(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__lowercase : List[Any] = optax.adamw(learning_rate=_lowerCamelCase, weight_decay=_lowerCamelCase, mask=_lowerCamelCase )
return tx, lr
| 575 | 1 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , a: Tuple , a: Union[str, Any]=100 , a: Dict=13 , a: str=30 , a: Optional[int]=2 , a: List[str]=3 , a: int=True , a: Optional[Any]=True , a: List[str]=32 , a: List[str]=5 , a: Union[str, Any]=4 , a: Dict=37 , a: Union[str, Any]="gelu" , a: Tuple=0.1 , a: str=0.1 , a: Union[str, Any]=10 , a: Union[str, Any]=0.0_2 , a: str=3 , ):
__lowerCamelCase : int = parent
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 1
def _snake_case ( self: Dict ):
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _snake_case ( self: int , a: Optional[Any] , a: Dict , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = FlaxBeitModel(config=a )
__lowerCamelCase : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self: List[str] , a: Optional[Any] , a: Optional[Any] , a: List[str] ):
__lowerCamelCase : Optional[Any] = FlaxBeitForMaskedImageModeling(config=a )
__lowerCamelCase : Dict = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: Union[str, Any] , a: str ):
__lowerCamelCase : Any = self.type_sequence_label_size
__lowerCamelCase : Tuple = FlaxBeitForImageClassification(config=a )
__lowerCamelCase : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : List[str] = FlaxBeitForImageClassification(a )
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = config_and_inputs
__lowerCamelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = FlaxBeitModelTester(self )
__lowerCamelCase : Tuple = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _snake_case ( self: Any ):
self.config_tester.run_common_tests()
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple = model_class(a )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: int ):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : str = self._prepare_for_class(a , a )
__lowerCamelCase : str = model_class(a )
@jax.jit
def model_jitted(a: Union[str, Any] , **a: Optional[int] ):
return model(pixel_values=a , **a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Tuple = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _snake_case ( self: List[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : int = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
__lowerCamelCase : Any = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[Any] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
__lowerCamelCase : Optional[Any] = self.default_image_processor
__lowerCamelCase : str = prepare_img()
__lowerCamelCase : Dict = image_processor(images=a , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
__lowerCamelCase : Dict = np.ones((1, 196) , dtype=a )
# forward pass
__lowerCamelCase : str = model(pixel_values=a , bool_masked_pos=a )
__lowerCamelCase : List[Any] = outputs.logits
# verify the logits
__lowerCamelCase : Tuple = (1, 196, 8192)
self.assertEqual(logits.shape , a )
__lowerCamelCase : Optional[Any] = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , a , atol=1e-2 ) )
@slow
def _snake_case ( self: Any ):
__lowerCamelCase : Any = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=a , return_tensors='np' )
# forward pass
__lowerCamelCase : str = model(**a )
__lowerCamelCase : Tuple = outputs.logits
# verify the logits
__lowerCamelCase : Any = (1, 1000)
self.assertEqual(logits.shape , a )
__lowerCamelCase : List[str] = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , a , atol=1e-4 ) )
__lowerCamelCase : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() , a )
@slow
def _snake_case ( self: str ):
__lowerCamelCase : Optional[Any] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
__lowerCamelCase : Optional[Any] = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='np' )
# forward pass
__lowerCamelCase : List[Any] = model(**a )
__lowerCamelCase : Dict = outputs.logits
# verify the logits
__lowerCamelCase : Dict = (1, 2_1841)
self.assertEqual(logits.shape , a )
__lowerCamelCase : List[Any] = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , a , atol=1e-4 ) )
__lowerCamelCase : List[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , a )
| 230 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """naver-clova-ix/donut-base-finetuned-docvqa"""
__snake_case = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__snake_case = """document_qa"""
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ["""image""", """text"""]
__snake_case = ["""text"""]
def __init__( self: Dict , *a: List[Any] , **a: List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*a , **a )
def _snake_case ( self: str , a: "Image" , a: str ):
__lowerCamelCase : str = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowerCamelCase : Dict = task_prompt.replace('{user_input}' , a )
__lowerCamelCase : Optional[Any] = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt' ).input_ids
__lowerCamelCase : Union[str, Any] = self.pre_processor(a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self: Optional[Any] , a: Tuple ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def _snake_case ( self: Optional[Any] , a: Any ):
__lowerCamelCase : Union[str, Any] = self.pre_processor.batch_decode(a )[0]
__lowerCamelCase : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__lowerCamelCase : Optional[int] = re.sub(R'<.*?>' , '' , a , count=1 ).strip() # remove first task start token
__lowerCamelCase : int = self.pre_processor.tokenajson(a )
return sequence["answer"]
| 230 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 593 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'perceiver'
def __init__( self : List[str], _snake_case : Optional[Any]=256, _snake_case : int=1_280, _snake_case : Optional[int]=768, _snake_case : List[str]=1, _snake_case : str=26, _snake_case : Union[str, Any]=8, _snake_case : Optional[int]=8, _snake_case : Optional[int]=None, _snake_case : str=None, _snake_case : List[str]="kv", _snake_case : str=1, _snake_case : Optional[Any]=1, _snake_case : str="gelu", _snake_case : List[Any]=0.1, _snake_case : Any=0.02, _snake_case : Union[str, Any]=1E-12, _snake_case : str=True, _snake_case : Any=262, _snake_case : Union[str, Any]=2_048, _snake_case : List[str]=56, _snake_case : Tuple=[368, 496], _snake_case : Dict=16, _snake_case : Tuple=1_920, _snake_case : Optional[Any]=16, _snake_case : Optional[Any]=[1, 16, 224, 224], **_snake_case : Optional[Any], ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case : Union[str, Any] =num_latents
snake_case : str =d_latents
snake_case : Any =d_model
snake_case : Any =num_blocks
snake_case : Tuple =num_self_attends_per_block
snake_case : int =num_self_attention_heads
snake_case : str =num_cross_attention_heads
snake_case : List[Any] =qk_channels
snake_case : Tuple =v_channels
snake_case : str =cross_attention_shape_for_attention
snake_case : Union[str, Any] =self_attention_widening_factor
snake_case : Union[str, Any] =cross_attention_widening_factor
snake_case : Optional[int] =hidden_act
snake_case : Any =attention_probs_dropout_prob
snake_case : int =initializer_range
snake_case : str =layer_norm_eps
snake_case : Dict =use_query_residual
# masked language modeling attributes
snake_case : List[Any] =vocab_size
snake_case : List[Any] =max_position_embeddings
# image classification attributes
snake_case : List[str] =image_size
# flow attributes
snake_case : Optional[Any] =train_size
# multimodal autoencoding attributes
snake_case : Dict =num_frames
snake_case : Optional[Any] =audio_samples_per_frame
snake_case : Dict =samples_per_patch
snake_case : Union[str, Any] =output_shape
class lowerCAmelCase_ ( a_ ):
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Tuple ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return 1E-4
def __snake_case ( self : Dict, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional[TensorType] = None, _snake_case : int = 3, _snake_case : int = 40, _snake_case : int = 40, ):
'''simple docstring'''
if isinstance(_snake_case, _snake_case ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Any =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : Tuple =preprocessor.num_special_tokens_to_add(_snake_case )
snake_case : Optional[Any] =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
snake_case : str =[''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case : int =dict(preprocessor(_snake_case, return_tensors=_snake_case ) )
snake_case : List[str] =inputs.pop('''input_ids''' )
return inputs
elif isinstance(_snake_case, _snake_case ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Union[str, Any] =compute_effective_axis_dimension(_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case : Dict =self._generate_dummy_images(_snake_case, _snake_case, _snake_case, _snake_case )
snake_case : Optional[Any] =dict(preprocessor(images=_snake_case, return_tensors=_snake_case ) )
snake_case : Optional[Any] =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 349 | 0 |
from __future__ import annotations
from typing import Any
def lowerCamelCase__ (__lowerCamelCase ):
create_state_space_tree(__lowerCamelCase, [], 0 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase, __lowerCamelCase, index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase, __lowerCamelCase, index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCamelCase__ =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq) | 721 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
while a != 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = b % a, a
return b
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if gcd(__lowerCamelCase, __lowerCamelCase ) != 1:
_SCREAMING_SNAKE_CASE : int = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = 1, 0, a
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = 0, 1, m
while va != 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ua // va
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 381 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a_ ( lowerCamelCase ):
lowercase = """xglm"""
lowercase = ["""past_key_values"""]
lowercase = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=256008 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = ffn_dim
UpperCamelCase = num_layers
UpperCamelCase = attention_heads
UpperCamelCase = activation_function
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = layerdrop
UpperCamelCase = init_std
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 301 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , )-> Optional[int]:
output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , )
else:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , )
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False )-> Optional[Any]:
UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCamelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
UpperCamelCase = """cpu"""
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase = Path(__UpperCamelCase )
# TEXT ENCODER
UpperCamelCase = pipeline.text_encoder.config.max_position_embeddings
UpperCamelCase = pipeline.text_encoder.config.hidden_size
UpperCamelCase = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__UpperCamelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=__UpperCamelCase , )
del pipeline.text_encoder
# UNET
UpperCamelCase = pipeline.unet.config.in_channels
UpperCamelCase = pipeline.unet.config.sample_size
UpperCamelCase = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
torch.randn(2 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
torch.randn(2 , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=__UpperCamelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=__UpperCamelCase , use_external_data_format=__UpperCamelCase , )
UpperCamelCase = str(unet_path.absolute().as_posix() )
UpperCamelCase = os.path.dirname(__UpperCamelCase )
UpperCamelCase = onnx.load(__UpperCamelCase )
# clean up existing tensor files
shutil.rmtree(__UpperCamelCase )
os.mkdir(__UpperCamelCase )
# collate external tensor files into one
onnx.save_model(
__UpperCamelCase , __UpperCamelCase , save_as_external_data=__UpperCamelCase , all_tensors_to_one_file=__UpperCamelCase , location="""weights.pb""" , convert_attribute=__UpperCamelCase , )
del pipeline.unet
# VAE ENCODER
UpperCamelCase = pipeline.vae
UpperCamelCase = vae_encoder.config.in_channels
UpperCamelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCamelCase = lambda __UpperCamelCase , __UpperCamelCase : vae_encoder.encode(__UpperCamelCase , __UpperCamelCase )[0].sample()
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCamelCase , )
# VAE DECODER
UpperCamelCase = pipeline.vae
UpperCamelCase = vae_decoder.config.latent_channels
UpperCamelCase = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCamelCase = vae_encoder.decode
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCamelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCamelCase = pipeline.safety_checker
UpperCamelCase = safety_checker.config.vision_config.num_channels
UpperCamelCase = safety_checker.config.vision_config.image_size
UpperCamelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
torch.randn(1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=__UpperCamelCase , )
del pipeline.safety_checker
UpperCamelCase = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
UpperCamelCase = pipeline.feature_extractor
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__UpperCamelCase )
print("""ONNX pipeline saved to""" , __UpperCamelCase )
del pipeline
del onnx_pipeline
UpperCamelCase = OnnxStableDiffusionPipeline.from_pretrained(__UpperCamelCase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 301 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = '''▁'''
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
a__ = {
'''google/pegasus-xsum''': 5_1_2,
}
a__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
"""simple docstring"""
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : int="<mask_2>" , UpperCamelCase__ : Union[str, Any]="<mask_1>" , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=1_0_3 , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__):
raise TypeError(
F'''additional_special_tokens should be of type {type(snake_case__)}, but is'''
F''' {type(snake_case__)}''')
snake_case__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(snake_case__) , self.offset - 1)
]
if len(set(snake_case__)) != len(snake_case__):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''')
snake_case__ = additional_special_tokens_extended
else:
snake_case__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset)]
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , pad_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case__ = mask_token_sent
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(snake_case__)
# add special tokens to encoder dict
snake_case__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
snake_case__ = {v: k for k, v in self.encoder.items()}
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return len(self.sp_model) + self.offset
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(snake_case__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __magic_name__ ( self : Any , UpperCamelCase__ : str):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__)
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
snake_case__ = self.sp_model.piece_to_id(snake_case__)
return sp_id + self.offset
def __magic_name__ ( self : Dict , UpperCamelCase__ : int):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
snake_case__ = self.sp_model.IdToPiece(index - self.offset)
return token
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = []
snake_case__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__) + token
snake_case__ = []
else:
current_sub_tokens.append(snake_case__)
out_string += self.sp_model.decode(snake_case__)
return out_string.strip()
def __magic_name__ ( self : int , UpperCamelCase__ : str=False):
'''simple docstring'''
return 1
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__ ( self : str , UpperCamelCase__ : List , UpperCamelCase__ : Optional[List] = None , UpperCamelCase__ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(snake_case__)
elif token_ids_a is None:
return self._special_token_mask(snake_case__) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str=None):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(snake_case__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
snake_case__ = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(snake_case__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , snake_case__)
elif not os.path.isfile(self.vocab_file):
with open(snake_case__ , """wb""") as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(snake_case__)
return (out_vocab_file,)
| 706 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def _UpperCAmelCase ( ):
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case__ = parser.parse_args()
return args.f
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = logging.StreamHandler(sys.stdout)
logger.addHandler(UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(UpperCamelCase__ , """argv""" , UpperCamelCase__):
snake_case__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCamelCase__ , 0.6_66)
@slow
@require_torch_non_multi_gpu
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
| 99 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.