code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__snake_case = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__snake_case = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
__snake_case = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
__snake_case = processor(text=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 24
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__magic_name__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 276
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = "src/transformers"
A_ = "docs/source/en/tasks"
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
lowercase = f.readlines()
# Find the start prompt.
lowercase = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
lowercase = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
lowercase = TASK_GUIDE_TO_MODELS[task_guide]
lowercase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(SCREAMING_SNAKE_CASE_, set() )
lowercase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=False )-> Optional[int]:
"""simple docstring"""
lowercase = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''', end_prompt='''<!--End of the generated tip-->''', )
lowercase = get_model_list_for_task(SCREAMING_SNAKE_CASE_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 702
|
import cmath
import math
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> complex:
"""simple docstring"""
lowercase = math.radians(UpperCAmelCase )
lowercase = math.radians(UpperCAmelCase )
# Convert voltage and current to rectangular form
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 256
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase ( lowerCamelCase_: str=None ):
'''simple docstring'''
if subparsers is not None:
A : Tuple = subparsers.add_parser('''test''' )
else:
A : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
A : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
A : Any = script_name
else:
A : str = f"""--config_file={args.config_file} {script_name}"""
A : Tuple = ['''accelerate-launch'''] + test_args.split()
A : List[Any] = execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _lowerCamelCase ( ):
'''simple docstring'''
A : List[str] = test_command_parser()
A : Any = parser.parse_args()
test_command(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 256
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 134
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase : Optional[Any] = Lock()
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase = min(__snake_case , __snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase = max(__snake_case , __snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = []
lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase = temp_rs
lowercase = temp_rr
for i in range(1 , len(__snake_case ) - 1 ):
lowercase = Pipe()
lowercase = Pipe()
process_array_.append(
Process(
target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase = temp_rs
lowercase = temp_rr
process_array_.append(
Process(
target=__snake_case , args=(
len(__snake_case ) - 1,
arr[len(__snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__snake_case ) ):
lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__snake_case )
lowercase = odd_even_transposition(__snake_case )
print('Sorted List\n' )
print(*__snake_case )
if __name__ == "__main__":
main()
| 134
| 1
|
def lowerCAmelCase_ ( snake_case_ = 1000 ):
_A : Dict = 2**power
_A : List[str] = 0
while n:
_A , _A : Tuple = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 307
|
import string
import numpy
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return b if a == 0 else greatest_common_divisor(b % a,snake_case_ )
class lowercase :
_a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_a = numpy.vectorize(lambda UpperCamelCase__ : x % 3_6 )
_a = numpy.vectorize(UpperCamelCase__ )
def __init__( self , _a ) -> None:
_A : Dict = self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : str = encrypt_key.shape[0]
def a__ ( self , _a ) -> int:
return self.key_string.index(_a )
def a__ ( self , _a ) -> str:
return self.key_string[round(_a )]
def a__ ( self ) -> None:
_A : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_A : Union[str, Any] = det % len(self.key_string )
_A : Optional[Any] = len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
_A : List[Any] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_a )
def a__ ( self , _a ) -> str:
_A : List[str] = [char for char in text.upper() if char in self.key_string]
_A : Tuple = chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def a__ ( self , _a ) -> str:
_A : int = self.process_text(text.upper() )
_A : int = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_A : Dict = text[i : i + self.break_key]
_A : int = [self.replace_letters(_a ) for char in batch]
_A : str = numpy.array([vec] ).T
_A : Optional[Any] = self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
_A : Tuple = """""".join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def a__ ( self ) -> numpy.ndarray:
_A : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_A : str = det % len(self.key_string )
_A : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_A : Dict = i
break
_A : Union[str, Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper() )
_A : Optional[int] = """"""
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
_A : Optional[int] = text[i : i + self.break_key]
_A : List[Any] = [self.replace_letters(_a ) for char in batch]
_A : Tuple = numpy.array([vec] ).T
_A : List[str] = self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
_A : str = """""".join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = int(input("""Enter the order of the encryption key: """ ) )
_A : Optional[int] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(snake_case_ ):
_A : Any = [int(snake_case_ ) for x in input().split()]
hill_matrix.append(snake_case_ )
_A : Dict = HillCipher(numpy.array(snake_case_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_A : str = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_A : str = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(snake_case_ ) )
elif option == "2":
_A : Tuple = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 307
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[str] = use_input_mask
_UpperCAmelCase : List[str] = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : Optional[int] = scope
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_input_mask:
_UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[str] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = self.prepare_config_and_inputs()
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = NezhaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_UpperCAmelCase : str = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_UpperCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = True
_UpperCAmelCase : List[Any] = NezhaModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : int = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
_UpperCAmelCase : Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
_UpperCAmelCase : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = NezhaForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = NezhaForNextSentencePrediction(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = NezhaForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Dict = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = NezhaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : List[str] = NezhaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : int = NezhaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.num_choices
_UpperCAmelCase : Tuple = NezhaForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Tuple = config_and_inputs
_UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def a_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
_UpperCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = NezhaModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def a_ ( self : int ) -> Dict:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase : Tuple = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase_ )
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = NezhaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
@require_torch_gpu
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = model_class(config=UpperCAmelCase_ )
_UpperCAmelCase : str = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Any = torch.jit.trace(
UpperCAmelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''bert.pt''' ) )
_UpperCAmelCase : Any = torch.jit.load(os.path.join(UpperCAmelCase_ , '''bert.pt''' ) , map_location=UpperCAmelCase_ )
loaded(inputs_dict['''input_ids'''].to(UpperCAmelCase_ ) , inputs_dict['''attention_mask'''].to(UpperCAmelCase_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
_UpperCAmelCase : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_UpperCAmelCase : Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_UpperCAmelCase : str = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
_UpperCAmelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_UpperCAmelCase : int = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 416
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = Dict[str, Any]
UpperCAmelCase__ : List[str] = List[Prediction]
@add_end_docstrings(lowercase_ )
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a_ ( self : str , **UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = {}
if "threshold" in kwargs:
_UpperCAmelCase : List[str] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = load_image(UpperCAmelCase_ )
_UpperCAmelCase : Dict = torch.IntTensor([[image.height, image.width]] )
_UpperCAmelCase : Dict = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
_UpperCAmelCase : Optional[Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
_UpperCAmelCase : Tuple = target_size
return inputs
def a_ ( self : List[str] , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = model_inputs.pop('''target_size''' )
_UpperCAmelCase : int = self.model(**UpperCAmelCase_ )
_UpperCAmelCase : int = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
_UpperCAmelCase : Tuple = model_inputs['''bbox''']
return model_outputs
def a_ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=0.9 ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCAmelCase , _UpperCAmelCase : Tuple = target_size[0].tolist()
def unnormalize(UpperCAmelCase_ : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCAmelCase : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCAmelCase : List[Any] = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
_UpperCAmelCase : Union[str, Any] = ['''score''', '''label''', '''box''']
_UpperCAmelCase : Optional[Any] = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCAmelCase : Optional[int] = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Any = raw_annotations[0]
_UpperCAmelCase : List[str] = raw_annotation['''scores''']
_UpperCAmelCase : str = raw_annotation['''labels''']
_UpperCAmelCase : Dict = raw_annotation['''boxes''']
_UpperCAmelCase : List[str] = scores.tolist()
_UpperCAmelCase : int = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCAmelCase : Any = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCAmelCase : Tuple = ['''score''', '''label''', '''box''']
_UpperCAmelCase : Any = [
dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def a_ ( self : Optional[int] , UpperCAmelCase_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = box.int().tolist()
_UpperCAmelCase : Optional[Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 416
| 1
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = (1 - _cos) / 2
UpperCAmelCase = 1 - _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = (1 + _cos) / 2
UpperCAmelCase = -1 - _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = _sin / 2
UpperCAmelCase = 0
UpperCAmelCase = -ba
UpperCAmelCase = 1 + alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 1 - alpha
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 + alpha
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 10 ** (gain_db / 40)
UpperCAmelCase = 1 + alpha * big_a
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha * big_a
UpperCAmelCase = 1 + alpha / big_a
UpperCAmelCase = -2 * _cos
UpperCAmelCase = 1 - alpha / big_a
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 10 ** (gain_db / 40)
UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase = 2 * sqrt(UpperCamelCase__ ) * alpha
UpperCAmelCase = big_a * (pmc + aaa)
UpperCAmelCase = 2 * big_a * mpc
UpperCAmelCase = big_a * (pmc - aaa)
UpperCAmelCase = ppmc + aaa
UpperCAmelCase = -2 * pmpc
UpperCAmelCase = ppmc - aaa
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase = tau * frequency / samplerate
UpperCAmelCase = sin(UpperCamelCase__ )
UpperCAmelCase = cos(UpperCamelCase__ )
UpperCAmelCase = _sin / (2 * q_factor)
UpperCAmelCase = 10 ** (gain_db / 40)
UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase = 2 * sqrt(UpperCamelCase__ ) * alpha
UpperCAmelCase = big_a * (ppmc + aaa)
UpperCAmelCase = -2 * big_a * pmpc
UpperCAmelCase = big_a * (ppmc - aaa)
UpperCAmelCase = pmc + aaa
UpperCAmelCase = 2 * mpc
UpperCAmelCase = pmc - aaa
UpperCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 323
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> list[tuple[int, int]]:
lowerCamelCase , lowerCamelCase : Optional[int] = position
lowerCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase : Optional[Any] = []
for position in positions:
lowerCamelCase , lowerCamelCase : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def snake_case ( UpperCamelCase__ : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> bool:
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = position
if board[y][x] == 0:
lowerCamelCase : List[Any] = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
lowerCamelCase : int = 0
return False
def snake_case ( UpperCamelCase__ : int ) -> list[list[int]]:
lowerCamelCase : List[str] = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
lowerCamelCase : Any = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[Any] = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : Optional[int] , a : Optional[Any]=13 , a : List[Any]=10 , a : str=3 , a : Tuple=2 , a : List[str]=2 , a : Union[str, Any]=2 , a : Optional[int]=True , a : Optional[Any]=True , a : Optional[int]=32 , a : Tuple=5 , a : Any=4 , a : Dict=37 , a : Union[str, Any]="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : int=10 , a : int=0.02 , a : Tuple=0.9 , a : Union[str, Any]=None , ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Any = tubelet_size
SCREAMING_SNAKE_CASE__ : str = num_frames
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = mask_ratio
SCREAMING_SNAKE_CASE__ : List[str] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE__ : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE__ : Tuple = int(mask_ratio * self.seq_length )
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self : str ) ->Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def A_ ( self : Dict , a : Optional[Any] , a : Dict , a : str ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = VideoMAEModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : str , a : Union[str, Any] , a : Tuple , a : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE__ : str = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE__ : Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE__ : Tuple = mask.expand(self.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE__ : Dict = model(a , a )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE__ : Dict = mask.sum().item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
snake_case_ = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def A_ ( self : Optional[int] , a : List[Any] , a : str , a : Dict=False ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE__ : str = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE__ : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE__ : List[str] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def A_ ( self : Optional[int] ) ->List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def A_ ( self : List[Any] ) ->Optional[int]:
pass
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def A_ ( self : Optional[Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def A_ ( self : int ) ->Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def A_ ( self : str ) ->Optional[Any]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE__ : Tuple = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(a )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A_ ( self : List[str] ) ->List[str]:
def check_hidden_states_output(a : Optional[int] , a : Optional[Any] , a : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
SCREAMING_SNAKE_CASE__ : str = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : str ) ->int:
pass
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE__ : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Optional[Any] ) ->str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A_ ( self : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
a )
SCREAMING_SNAKE_CASE__ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_video()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def A_ ( self : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_video()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(a , return_tensors="pt" ).to(a )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE__ : Any = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
SCREAMING_SNAKE_CASE__ : str = torch.load(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size([1, 14_08, 15_36] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([0.5142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=a ).to(
a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(torch.tensor([0.6469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1E-4 ) )
| 26
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26
| 1
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def snake_case__ ( _snake_case : np.ndarray , _snake_case : tuple[int, int] , _snake_case : tuple[int, int] , _snake_case : bool , ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = grid.shape
UpperCamelCase__ = [-1, 1, 0, 0]
UpperCamelCase__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase__ , UpperCamelCase__ = [(0, source)], set()
UpperCamelCase__ = np.full((rows, cols) , np.inf )
UpperCamelCase__ = 0
UpperCamelCase__ = np.empty((rows, cols) , dtype=_snake_case )
UpperCamelCase__ = None
while queue:
((UpperCamelCase__) , (UpperCamelCase__)) = heappop(_snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase__ = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase__ , UpperCamelCase__ = predecessors[x, y]
path.append(_snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_snake_case ) ):
UpperCamelCase__ , UpperCamelCase__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_snake_case , (dist + 1, (nx, ny)) )
UpperCamelCase__ = dist + 1
UpperCamelCase__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A : Tuple = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def snake_case__ ( _snake_case : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 516
| 1
|
def A__ ( lowerCamelCase ) -> list:
UpperCamelCase_: str = len(lowerCamelCase )
for _ in range(lowerCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCamelCase_, UpperCamelCase_: Dict = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase_ : Any = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 670
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670
| 1
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : bool , snake_case_ : bool ) -> Union[str, Any]:
'''simple docstring'''
def run_func(snake_case_ : Any ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Tuple , **snake_case_ : Any ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Any , **snake_case_ : str ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __A ( UpperCamelCase__ ):
a__ : TensorFlowBenchmarkArguments
a__ : PretrainedConfig
a__ : str = "TensorFlow"
@property
def _lowercase (self : List[str] ):
return tf.__version__
def _lowercase (self : Optional[Any] , __a : str , __a : int , __a : int ):
# initialize GPU on separate process
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_inference_func(__a , __a , __a )
return self._measure_speed(_inference )
def _lowercase (self : Optional[Any] , __a : str , __a : int , __a : int ):
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_train_func(__a , __a , __a )
return self._measure_speed(_train )
def _lowercase (self : Optional[int] , __a : str , __a : int , __a : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __a )
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_inference_func(__a , __a , __a )
return self._measure_memory(_inference )
def _lowercase (self : Dict , __a : str , __a : int , __a : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __a )
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ = self._prepare_train_func(__a , __a , __a )
return self._measure_memory(_train )
def _lowercase (self : Optional[Any] , __a : str , __a : int , __a : int ):
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ = (
hasattr(__a , "architectures" )
and isinstance(config.architectures , __a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase_ = getattr(__a , __a )
UpperCAmelCase_ = model_cls(__a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](__a )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(__a , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(__a , __a , __a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__a , decoder_input_ids=__a , training=__a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__a , training=__a )
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase (self : Dict , __a : str , __a : int , __a : int ):
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ = (
hasattr(__a , "architectures" )
and isinstance(config.architectures , __a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase_ = getattr(__a , __a )
UpperCAmelCase_ = model_cls(__a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__a )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(__a , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(__a , __a , __a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase_ = model(__a , decoder_input_ids=__a , labels=__a , training=__a )[0]
UpperCAmelCase_ = tf.gradients(__a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase_ = model(__a , labels=__a , training=__a )[0]
UpperCAmelCase_ = tf.gradients(__a , model.trainable_variables )
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase (self : Dict , __a : str ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
__a , repeat=self.args.repeat , number=10 , )
return min(__a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def _lowercase (self : Dict , __a : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase_ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase_ = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(__a )
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(__a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(__a )
UpperCAmelCase_ = Memory(__a ) if isinstance(__a , __a ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(__a )
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 78
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__: List[Any] = logging.getLogger(__name__)
a__: str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a__: Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
__SCREAMING_SNAKE_CASE = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__SCREAMING_SNAKE_CASE = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , )->str:
def _dataset(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCamelCase__( )->List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
A__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
A__ = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
A__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A__ = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A__ = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A__ = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A__ = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
A__ = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
A__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate()
A__ = math.exp(eval_output['''eval_loss'''] )
A__ = {'''perplexity''': perplexity}
A__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def UpperCamelCase__( UpperCamelCase__ : List[str] )->List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 190
| 0
|
SCREAMING_SNAKE_CASE : List[str] = range(2, 20 + 1)
SCREAMING_SNAKE_CASE : Any = [10**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Dict = sum(a_i[j] for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ) )
_lowercase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase_ ) , lowerCamelCase_ ) ) )
_lowercase , _lowercase : Optional[Any] = 0, 0
_lowercase : Optional[Any] = n - i
_lowercase : Dict = memo.get(lowerCamelCase_ )
if sub_memo is not None:
_lowercase : List[Any] = sub_memo.get(lowerCamelCase_ )
if jumps is not None and len(lowerCamelCase_ ) > 0:
# find and make the largest jump without going over
_lowercase : List[str] = -1
for _k in range(len(lowerCamelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowercase : int = _k
break
if max_jump >= 0:
_lowercase , _lowercase , _lowercase : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowercase : Tuple = diff + c
for j in range(min(lowerCamelCase_ , len(lowerCamelCase_ ) ) ):
_lowercase , _lowercase : Tuple = divmod(lowerCamelCase_ , 10 )
if new_c > 0:
add(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase : str = []
else:
_lowercase : Any = {c: []}
_lowercase : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowercase , _lowercase : Optional[Any] = next_term(lowerCamelCase_ , k - 1 , i + dn , lowerCamelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowercase , _lowercase : Tuple = compute(lowerCamelCase_ , lowerCamelCase_ , i + dn , lowerCamelCase_ )
diff += _diff
dn += terms_jumped
_lowercase : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowercase : str = 0
while j < len(lowerCamelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase_ , (diff, dn, k) )
return (diff, dn)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if i >= n:
return 0, i
if k > len(lowerCamelCase_ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowercase : Any = i
_lowercase , _lowercase , _lowercase : str = 0, 0, 0
for j in range(len(lowerCamelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowercase : List[str] = ds_c + ds_b
diff += addend
_lowercase : List[Any] = 0
for j in range(lowerCamelCase_ ):
_lowercase : int = a_i[j] + addend
_lowercase , _lowercase : Any = divmod(lowerCamelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return diff, i - start_i
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
_lowercase : int = digits[j] + addend
if s >= 10:
_lowercase , _lowercase : Any = divmod(lowerCamelCase_ , 10 )
_lowercase : Dict = addend // 10 + quotient
else:
_lowercase : str = s
_lowercase : Any = addend // 10
if addend == 0:
break
while addend > 0:
_lowercase , _lowercase : Optional[int] = divmod(lowerCamelCase_ , 10 )
digits.append(lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ = 10**15 ) -> int:
_lowercase : Union[str, Any] = [1]
_lowercase : List[Any] = 1
_lowercase : Union[str, Any] = 0
while True:
_lowercase , _lowercase : Dict = next_term(lowerCamelCase_ , 20 , i + dn , lowerCamelCase_ )
dn += terms_jumped
if dn == n - i:
break
_lowercase : List[str] = 0
for j in range(len(lowerCamelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 354
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
| 1
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a ( __UpperCAmelCase ):
lowercase_ : Optional[int] = (DDPMParallelScheduler,)
def UpperCAmelCase__ ( self : str , **snake_case__ : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = len(snake_case__ )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = self.dummy_sample_deter + 0.1
__lowerCAmelCase = self.dummy_sample_deter - 0.1
__lowerCAmelCase = samplea.shape[0]
__lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
__lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase = torch.sum(torch.abs(snake_case__ ) )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = len(snake_case__ )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
__lowerCAmelCase = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(snake_case__ ) )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = len(snake_case__ )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
__lowerCAmelCase = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(snake_case__ ) )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
__lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
__lowerCAmelCase = -1
else:
__lowerCAmelCase = timesteps[i + 1]
__lowerCAmelCase = scheduler.previous_timestep(snake_case__ )
__lowerCAmelCase = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case__ )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = [100, 87, 50, 1, 0]
__lowerCAmelCase = len(snake_case__ )
with self.assertRaises(snake_case__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 611
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
hf_model.apply_weight_norm()
__lowerCAmelCase = checkpoint["input_conv.weight_g"]
__lowerCAmelCase = checkpoint["input_conv.weight_v"]
__lowerCAmelCase = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
__lowerCAmelCase = checkpoint["output_conv.1.weight_g"]
__lowerCAmelCase = checkpoint["output_conv.1.weight_v"]
__lowerCAmelCase = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: str=None , UpperCamelCase: Tuple=None , ):
"""simple docstring"""
if config_path is not None:
__lowerCAmelCase = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase )
else:
__lowerCAmelCase = SpeechTaHifiGanConfig()
__lowerCAmelCase = SpeechTaHifiGan(UpperCamelCase )
__lowerCAmelCase = torch.load(UpperCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = np.load(UpperCamelCase )
__lowerCAmelCase = stats[0].reshape(-1 )
__lowerCAmelCase = stats[1].reshape(-1 )
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).float()
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).float()
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 611
| 1
|
'''simple docstring'''
from math import ceil, sqrt
def __a(SCREAMING_SNAKE_CASE_ : int = 1000000 ):
'''simple docstring'''
_lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = range(2, 20 + 1)
_SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE = {}
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
_lowerCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
_lowerCAmelCase , _lowerCAmelCase = 0, 0
_lowerCAmelCase = n - i
_lowerCAmelCase = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
_lowerCAmelCase = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
_lowerCAmelCase = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCAmelCase = _k
break
if max_jump >= 0:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCAmelCase = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = []
else:
_lowerCAmelCase = {c: []}
_lowerCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCAmelCase , _lowerCAmelCase = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
_lowerCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCAmelCase = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCAmelCase = i
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCAmelCase = ds_c + ds_b
diff += addend
_lowerCAmelCase = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = a_i[j] + addend
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCAmelCase = digits[j] + addend
if s >= 10:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
_lowerCAmelCase = addend // 10 + quotient
else:
_lowerCAmelCase = s
_lowerCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(SCREAMING_SNAKE_CASE_ , 10 )
digits.append(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : int = 10**15 ):
'''simple docstring'''
_lowerCAmelCase = [1]
_lowerCAmelCase = 1
_lowerCAmelCase = 0
while True:
_lowerCAmelCase , _lowerCAmelCase = next_term(SCREAMING_SNAKE_CASE_ , 20 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
_lowerCAmelCase = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : str = ""
__lowerCamelCase : List[Any] = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowerCamelCase : Tuple = None # compression type in fsspec. ex: "gzip"
__lowerCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowerCamelCase__ = "", lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__ ):
super().__init__(self, **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A : int = fsspec.open(
__snake_case, mode="""rb""", protocol=__snake_case, compression=self.compression, client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""", {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
A : List[Any] = os.path.basename(self.file.path.split("""::""" )[0] )
A : List[Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
A : Tuple = None
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip("""/""" )
def _lowerCAmelCase ( self ):
if self.dir_cache is None:
A : Any = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
A : Optional[int] = {f["""name"""]: f}
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.file.open().read()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = "rb", lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
A : Optional[Any] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "bz2"
__lowerCamelCase : List[Any] = "bz2"
__lowerCamelCase : Dict = ".bz2"
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : int = "gzip"
__lowerCamelCase : Optional[int] = "gzip"
__lowerCamelCase : Dict = ".gz"
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "lz4"
__lowerCamelCase : Union[str, Any] = "lz4"
__lowerCamelCase : int = ".lz4"
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "xz"
__lowerCamelCase : Any = "xz"
__lowerCamelCase : List[Any] = ".xz"
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = "zstd"
__lowerCamelCase : Optional[Any] = "zstd"
__lowerCamelCase : int = ".zst"
def __init__( self, lowerCamelCase__, lowerCamelCase__ = "rb", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = DEFAULT_BLOCK_SIZE, **lowerCamelCase__, ):
super().__init__(
fo=__snake_case, mode=__snake_case, target_protocol=__snake_case, target_options=__snake_case, block_size=__snake_case, **__snake_case, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A : Union[str, Any] = self.file.__enter__
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : Optional[Any] = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self, *lowerCamelCase__, **lowerCamelCase__ ):
self._file.__exit__(*__snake_case, **__snake_case )
def __iter__( self ):
return iter(self._file )
def _lowerCAmelCase ( self ):
return next(self._file )
def __getattr__( self, lowerCamelCase__ ):
return getattr(self._file, __snake_case )
def fixed_enter(*lowerCamelCase__, **lowerCamelCase__ ):
return WrappedFile(_enter(*__snake_case, **__snake_case ) )
A : Tuple = fixed_enter
| 662
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Tuple:
__magic_name__: Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__magic_name__: Any = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCAmelCase )
# Let's go
__magic_name__: int = parser.parse_args()
if not hasattr(__UpperCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__magic_name__: Optional[Any] = args.func(__UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 96
| 0
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _a ( self ):
lowerCamelCase__ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCamelCase__ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCamelCase__ ="xvjiarui/stable-diffusion-2-inpainting"
lowerCamelCase__ , lowerCamelCase__ =FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowerCamelCase , safety_checker=_lowerCamelCase )
lowerCamelCase__ ="Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase__ =jax.random.PRNGKey(0 )
lowerCamelCase__ =50
lowerCamelCase__ =jax.device_count()
lowerCamelCase__ =num_samples * [prompt]
lowerCamelCase__ =num_samples * [init_image]
lowerCamelCase__ =num_samples * [mask_image]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =pipeline.prepare_inputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# shard inputs and rng
lowerCamelCase__ =replicate(_lowerCamelCase )
lowerCamelCase__ =jax.random.split(_lowerCamelCase , jax.device_count() )
lowerCamelCase__ =shard(_lowerCamelCase )
lowerCamelCase__ =shard(_lowerCamelCase )
lowerCamelCase__ =shard(_lowerCamelCase )
lowerCamelCase__ =pipeline(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , jit=_lowerCamelCase )
lowerCamelCase__ =output.images.reshape(_lowerCamelCase , 512 , 512 , 3 )
lowerCamelCase__ =images[0, 253:256, 253:256, -1]
lowerCamelCase__ =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ =jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 132
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132
| 1
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__lowerCAmelCase : str = True
from torch.cuda.amp import autocast
__lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
a_ = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
a_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
a_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
a_ = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
a_ = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
a_ = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
a_ = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class A :
a_ = field(
default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
a_ = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
a_ = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
a_ = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
a_ = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
a_ = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
a_ = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class A :
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : Optional[Any] , __a : Optional[Any] ) -> Dict[str, torch.Tensor]:
__UpperCAmelCase = [{"input_values": feature["input_values"]} for feature in features]
__UpperCAmelCase = [{"input_ids": feature["labels"]} for feature in features]
__UpperCAmelCase = self.processor.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__UpperCAmelCase = self.processor.pad(
labels=__UpperCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__UpperCAmelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__UpperCAmelCase = labels
return batch
class A ( lowerCamelCase__ ):
def snake_case__ ( self : Tuple , __a : Optional[int] , __a : Optional[Any] ) -> torch.Tensor:
model.train()
__UpperCAmelCase = self._prepare_inputs(__UpperCamelCase )
if self.use_amp:
with autocast():
__UpperCAmelCase = self.compute_loss(__UpperCamelCase , __UpperCamelCase )
else:
__UpperCAmelCase = self.compute_loss(__UpperCamelCase , __UpperCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']""" )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__UpperCamelCase )
else:
loss.backward()
return loss.detach()
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCAmelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCAmelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__UpperCAmelCase = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(UpperCamelCase__ : Optional[int] ):
__UpperCAmelCase = re.sub(_lowerCAmelCase , '''''' , batch['''sentence'''] ).lower() + " "
return batch
__UpperCAmelCase = train_dataset.map(_lowerCAmelCase , remove_columns=['''sentence'''] )
__UpperCAmelCase = eval_dataset.map(_lowerCAmelCase , remove_columns=['''sentence'''] )
def extract_all_chars(UpperCamelCase__ : Optional[int] ):
__UpperCAmelCase = " ".join(batch['''text'''] )
__UpperCAmelCase = list(set(_lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCAmelCase = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=-1 , keep_in_memory=_lowerCAmelCase , remove_columns=train_dataset.column_names , )
__UpperCAmelCase = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=-1 , keep_in_memory=_lowerCAmelCase , remove_columns=eval_dataset.column_names , )
__UpperCAmelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__UpperCAmelCase = {v: k for k, v in enumerate(_lowerCAmelCase )}
__UpperCAmelCase = vocab_dict[" "]
del vocab_dict[" "]
__UpperCAmelCase = len(_lowerCAmelCase )
__UpperCAmelCase = len(_lowerCAmelCase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase )
__UpperCAmelCase = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
__UpperCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCAmelCase = min(len(_lowerCAmelCase ) , data_args.max_train_samples )
__UpperCAmelCase = train_dataset.select(range(_lowerCAmelCase ) )
if data_args.max_val_samples is not None:
__UpperCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCAmelCase = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCamelCase__ : List[str] ):
__UpperCAmelCase = torchaudio.load(batch['''path'''] )
__UpperCAmelCase = resampler(_lowerCAmelCase ).squeeze().numpy()
__UpperCAmelCase = 1_6_0_0_0
__UpperCAmelCase = batch["text"]
return batch
__UpperCAmelCase = train_dataset.map(
_lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase = eval_dataset.map(
_lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCamelCase__ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__UpperCAmelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(_lowerCAmelCase )
return batch
__UpperCAmelCase = train_dataset.map(
_lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase = eval_dataset.map(
_lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCAmelCase = datasets.load_metric('''wer''' )
def compute_metrics(UpperCamelCase__ : List[Any] ):
__UpperCAmelCase = pred.predictions
__UpperCAmelCase = np.argmax(_lowerCAmelCase , axis=-1 )
__UpperCAmelCase = processor.tokenizer.pad_token_id
__UpperCAmelCase = processor.batch_decode(_lowerCAmelCase )
# we do not want to group tokens when computing the metrics
__UpperCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=_lowerCAmelCase )
__UpperCAmelCase = wer_metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCAmelCase = DataCollatorCTCWithPadding(processor=_lowerCAmelCase , padding=_lowerCAmelCase )
# Initialize our Trainer
__UpperCAmelCase = CTCTrainer(
model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase = model_args.model_name_or_path
else:
__UpperCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCAmelCase = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
__UpperCAmelCase = train_result.metrics
__UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
__UpperCAmelCase = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('''train''' , _lowerCAmelCase )
trainer.save_metrics('''train''' , _lowerCAmelCase )
trainer.save_state()
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowerCAmelCase )
__UpperCAmelCase = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('''eval''' , _lowerCAmelCase )
trainer.save_metrics('''eval''' , _lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 262
|
from math import isclose, sqrt
def UpperCAmelCase_ (_lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
__UpperCamelCase : Optional[Any] = point_y / 4 / point_x
__UpperCamelCase : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__UpperCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__UpperCamelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__UpperCamelCase : List[str] = outgoing_gradient**2 + 4
__UpperCamelCase : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__UpperCamelCase : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
__UpperCamelCase : Union[str, Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__UpperCamelCase : List[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__UpperCamelCase : List[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
__UpperCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCAmelCase_ (_lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ):
__UpperCamelCase : int = 0
__UpperCamelCase : float = first_x_coord
__UpperCamelCase : float = first_y_coord
__UpperCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 327
| 0
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = np.inf
def set_batch_size(_lowercase ) -> None:
nonlocal batch_size
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase : int = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Tuple = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase , _lowercase ) and feature.dtype == "binary":
UpperCAmelCase : Optional[Any] = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase , _lowercase )
return None if batch_size is np.inf else batch_size
class UpperCamelCase_ ( UpperCamelCase_ ):
def __init__( self , A , A = None , A = None , A = None , A = False , A = False , A = None , **A , ) -> int:
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase : Optional[int] = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
UpperCAmelCase : Union[str, Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
UpperCAmelCase : Optional[Any] = Parquet(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , hash=UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase( self ) -> Dict:
if self.streaming:
UpperCAmelCase : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
UpperCAmelCase : str = None
UpperCAmelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
UpperCAmelCase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A = None , **A , ) -> str:
UpperCAmelCase : Optional[Any] = dataset
UpperCAmelCase : Any = path_or_buf
UpperCAmelCase : Any = batch_size or get_writer_batch_size(dataset.features )
UpperCAmelCase : Tuple = parquet_writer_kwargs
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
UpperCAmelCase : Optional[int] = self._write(file_obj=UpperCamelCase__ , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
else:
UpperCAmelCase : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
return written
def _lowercase( self , A , A , **A ) -> str:
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : str = parquet_writer_kwargs.pop("""path_or_buf""" , UpperCamelCase__ )
UpperCAmelCase : List[Any] = self.dataset.features.arrow_schema
UpperCAmelCase : List[Any] = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ , **UpperCamelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase__ ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
UpperCAmelCase : List[Any] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase__ )
written += batch.nbytes
writer.close()
return written
| 712
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672
| 0
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Dict=False , __lowerCamelCase : int=True ) -> Dict:
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__lowerCAmelCase =cached_file(__lowerCamelCase , __lowerCamelCase , force_download=not use_cached_models )
__lowerCAmelCase =config_class.from_json_file(__lowerCamelCase )
__lowerCAmelCase =True
__lowerCAmelCase =True
print(f"""Building TensorFlow model from configuration: {config}""" )
__lowerCAmelCase =model_class(__lowerCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__lowerCAmelCase =cached_file(
__lowerCamelCase , __lowerCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__lowerCAmelCase =load_pytorch_checkpoint_in_tfa_model(__lowerCamelCase , __lowerCamelCase )
if compare_with_pt_model:
__lowerCAmelCase =tf_model(tf_model.dummy_inputs , training=__lowerCamelCase ) # build the network
__lowerCAmelCase =torch.load(__lowerCamelCase , map_location="""cpu""" )
__lowerCAmelCase =pt_model_class.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase , config=__lowerCamelCase , state_dict=__lowerCamelCase )
with torch.no_grad():
__lowerCAmelCase =pt_model(**pt_model.dummy_inputs )
__lowerCAmelCase =pto[0].numpy()
__lowerCAmelCase =tfo[0].numpy()
__lowerCAmelCase =np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(__lowerCamelCase , save_format="""h5""" )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=None , __lowerCamelCase : Any=False , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=False , ) -> Dict:
if args_model_type is None:
__lowerCAmelCase =list(MODEL_CLASSES.keys() )
else:
__lowerCAmelCase =[args_model_type]
for j, model_type in enumerate(__lowerCamelCase , start=1 ):
print("""=""" * 100 )
print(f""" Converting model type {j}/{len(__lowerCamelCase )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__lowerCAmelCase =list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__lowerCAmelCase =model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__lowerCamelCase , __lowerCamelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__lowerCAmelCase =model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(__lowerCamelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
__lowerCAmelCase =cached_file(__lowerCamelCase , __lowerCamelCase , force_download=not use_cached_models )
else:
__lowerCAmelCase =config_shortcut_name
if model_shortcut_name in aws_model_maps:
__lowerCAmelCase =cached_file(__lowerCamelCase , __lowerCamelCase , force_download=not use_cached_models )
else:
__lowerCAmelCase =model_shortcut_name
if os.path.isfile(__lowerCamelCase ):
__lowerCAmelCase ="""converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__lowerCamelCase , pytorch_checkpoint_path=__lowerCamelCase , config_file=__lowerCamelCase , tf_dump_path=os.path.join(__lowerCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__lowerCamelCase , )
if remove_cached_files:
os.remove(__lowerCamelCase )
os.remove(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 354
|
from __future__ import annotations
import time
import numpy as np
lowercase_ = [8, 5, 9, 7]
lowercase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __a :
def __init__( self : Tuple , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : list[list[int]] , )-> None:
__lowerCAmelCase =claim_vector
__lowerCAmelCase =allocated_resources_table
__lowerCAmelCase =maximum_claim_table
def UpperCamelCase ( self : Optional[int])-> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def UpperCamelCase ( self : Dict)-> list[int]:
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def UpperCamelCase ( self : Optional[Any])-> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(snake_case_))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def UpperCamelCase ( self : Dict)-> dict[int, list[int]]:
return {self.__need().index(snake_case_): i for i in self.__need()}
def UpperCamelCase ( self : Optional[Any] , **snake_case_ : Optional[Any])-> None:
__lowerCAmelCase =self.__need()
__lowerCAmelCase =self.__allocated_resources_table
__lowerCAmelCase =self.__available_resources()
__lowerCAmelCase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""")
while need_list:
__lowerCAmelCase =False
for each_need in need_list:
__lowerCAmelCase =True
for index, need in enumerate(snake_case_):
if need > available_resources[index]:
__lowerCAmelCase =False
break
if execution:
__lowerCAmelCase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase =original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(snake_case_)
# update available/freed resources stack
__lowerCAmelCase =np.array(snake_case_) + np.array(
alloc_resources_table[process_number])
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(snake_case_) for x in available_resources]))
break
if safe:
print("""The process is in a safe state.\n""")
else:
print("""System in unsafe state. Aborting...\n""")
break
def UpperCamelCase ( self : List[str])-> Optional[Any]:
print(""" """ * 9 + """Allocated Resource Table""")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(snake_case_) + 1}"""
+ """ """.join(F"""{it:>8}""" for it in item)
+ """\n""")
print(""" """ * 9 + """System Resource Table""")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(snake_case_) + 1}"""
+ """ """.join(F"""{it:>8}""" for it in item)
+ """\n""")
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(snake_case_) for x in self.__claim_vector))
print(
"""Initial Available Resources: """
+ """ """.join(str(snake_case_) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
| 1
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _snake_case (__lowercase):
if isinstance(__lowercase , collections.abc.Iterable):
return x
return (x, x)
@require_flax
class _a :
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(_UpperCAmelCase , _UpperCAmelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCamelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCamelCase_ = after_output[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1e-3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {'vision_model': vision_model, 'text_model': text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase )
UpperCamelCase_ = model(
input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
UpperCamelCase_ = output.vision_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = to_atuple(vision_model.config.image_size )
UpperCamelCase_ = to_atuple(vision_model.config.patch_size )
UpperCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase_ = output.text_model_output.attentions
self.assertEqual(len(_UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
pt_model.to(_UpperCAmelCase )
pt_model.eval()
# prepare inputs
UpperCamelCase_ = inputs_dict
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCamelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCamelCase_ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_UpperCAmelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = VisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase , from_flax=_UpperCAmelCase )
pt_model_loaded.to(_UpperCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase_ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_UpperCAmelCase , pt_output_loaded.numpy() , 4e-2 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = VisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCAmelCase )
UpperCamelCase_ = fx_state
self.check_pt_flax_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = VisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_UpperCAmelCase )
UpperCamelCase_ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCAmelCase )
@is_pt_flax_cross_test
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_inputs_dict.pop('vision_config' )
UpperCamelCase_ = config_inputs_dict.pop('text_config' )
UpperCamelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.check_equivalence_flax_to_pt(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.get_pretrained_model_and_inputs()
UpperCamelCase_ = model_a(**_UpperCAmelCase )
UpperCamelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = model_a(**_UpperCAmelCase )
UpperCamelCase_ = after_outputs[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1e-5 )
@require_flax
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_UpperCAmelCase , text_from_pt=_UpperCAmelCase , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = FlaxViTModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = FlaxViTModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_UpperCAmelCase , text_from_pt=_UpperCAmelCase , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
UpperCamelCase_ = FlaxCLIPVisionModel(_UpperCAmelCase )
UpperCamelCase_ = FlaxBertModel(_UpperCAmelCase )
return vision_model, text_model
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = FlaxCLIPVisionModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase_ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = model(**_UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _UpperCAmelCase , atol=1e-3 ) )
| 711
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = inspect.getfile(accelerate.test_utils )
A_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
A_ = ["""accelerate""", """launch"""]
A_ = Path.home() / """.cache/huggingface/accelerate"""
A_ = """default_config.yaml"""
A_ = config_folder / config_file
A_ = config_folder / """_default_config.yaml"""
A_ = Path("""tests/test_configs""" )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCAmelCase ( cls ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Tuple:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = """test-tpu"""
A_ = """us-central1-a"""
A_ = """ls"""
A_ = ["""accelerate""", """tpu-config"""]
A_ = """cd /usr/share"""
A_ = """tests/test_samples/test_command_file.sh"""
A_ = """Running gcloud compute tpus tpu-vm ssh"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _UpperCAmelCase , )
| 618
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _A ( __magic_name__ , __magic_name__):
SCREAMING_SNAKE_CASE : Optional[int] = '''focalnet'''
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=[192, 384, 768, 768] , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[3, 3, 3, 3] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1e-4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : Any = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : List[Any] = use_conv_embed
SCREAMING_SNAKE_CASE_ : str = hidden_sizes
SCREAMING_SNAKE_CASE_ : List[Any] = depths
SCREAMING_SNAKE_CASE_ : int = focal_levels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = focal_windows
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : str = use_layerscale
SCREAMING_SNAKE_CASE_ : Tuple = layerscale_value
SCREAMING_SNAKE_CASE_ : Optional[int] = use_post_layernorm
SCREAMING_SNAKE_CASE_ : str = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE_ : List[Any] = normalize_modulator
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_stride
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 511
|
from __future__ import annotations
from collections.abc import MutableSequence
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE_ : list[float] = list(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = degree
def __add__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE_ : Tuple = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _SCREAMING_SNAKE_CASE )
def __sub__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_SCREAMING_SNAKE_CASE )
return polynomial
def __repr__( self ):
"""simple docstring"""
return self.__str__()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[float] = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE_ : Optional[int] = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _SCREAMING_SNAKE_CASE )
def __eq__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not self.__eq__(_SCREAMING_SNAKE_CASE )
| 511
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Dict ) -> None:
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 443
|
UpperCAmelCase_ : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 443
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=snake_case__):
"""simple docstring"""
a__ : Optional[Any] = ["onnx"]
def __init__( self : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : List[str] ) -> List[str]:
requires_backends(self , ["""onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ["""onnx"""] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) -> int:
requires_backends(cls , ["""onnx"""] )
| 593
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__A = get_logger(__name__)
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : str=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase_= model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase_= F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase_= (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
UpperCAmelCase_= {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCAmelCase_ ,storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase_ ) ,planner=DefaultSavePlanner() ,)
logger.info(F"""Model saved to {ckpt_dir}""" )
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any]=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCAmelCase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
UpperCAmelCase_= F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase_= (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Loading model from {input_model_file}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase_= (
os.path.join(lowerCAmelCase_ ,F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
UpperCAmelCase_= {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCAmelCase_ ,storage_reader=dist_cp.FileSystemReader(lowerCAmelCase_ ) ,planner=DefaultLoadPlanner() ,)
UpperCAmelCase_= state_dict["""model"""]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Any=0 ) -> List[str]:
'''simple docstring'''
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase_= FSDP.optim_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase_= (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} ,storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase_ ) ,planner=DefaultSavePlanner() ,)
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : str=0 ) -> Union[str, Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase_= None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase_= (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase_= torch.load(lowerCAmelCase_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase_= (
os.path.join(lowerCAmelCase_ ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase_= load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key="""optimizer""" ,storage_reader=dist_cp.FileSystemReader(lowerCAmelCase_ ) ,)
UpperCAmelCase_= optim_state["""optimizer"""]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase_= FSDP.optim_state_dict_to_load(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
optimizer.load_state_dict(lowerCAmelCase_ )
| 593
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__magic_name__ : Any = 5_0_0_0_3
__magic_name__ : int = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = PLBartTokenizer
lowerCAmelCase_ = None
lowerCAmelCase_ = False
def lowercase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = PLBartTokenizer(__UpperCamelCase , language_codes="base" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
A_ = PLBartTokenizer(__UpperCamelCase , language_codes="base" , keep_accents=__UpperCamelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
A_ = tokenizer.vocab_size
A_ = [tokenizer.convert_ids_to_tokens(__UpperCamelCase ) for x in range(end - 4 , __UpperCamelCase )]
self.assertListEqual(__UpperCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
A_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
A_ = tokenizer(__UpperCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) , __UpperCamelCase , )
def lowercase_ ( self ):
A_ = PLBartTokenizer(__UpperCamelCase , language_codes="multi" , keep_accents=__UpperCamelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
A_ = tokenizer.vocab_size
A_ = [tokenizer.convert_ids_to_tokens(__UpperCamelCase ) for x in range(end - 7 , __UpperCamelCase )]
self.assertListEqual(
__UpperCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
A_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
A_ = tokenizer(__UpperCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) , __UpperCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = """uclanlp/plbart-python-en_XX"""
lowerCAmelCase_ = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
lowerCAmelCase_ = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
lowerCAmelCase_ = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowercase_ ( cls ):
A_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
A_ = 1
return cls
def lowercase_ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase_ ( self ):
A_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def lowercase_ ( self ):
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
A_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
A_ = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
A_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def lowercase_ ( self ):
A_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , __UpperCamelCase )
A_ = 10
A_ = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __UpperCamelCase )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def lowercase_ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase_ ( self ):
A_ = tempfile.mkdtemp()
A_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
A_ = PLBartTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def lowercase_ ( self ):
A_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="pt" )
A_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __UpperCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase_ ( self ):
A_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
A_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase_ ( self ):
A_ = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="pt" )
A_ = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=10 , return_tensors="pt" )
A_ = targets["input_ids"]
A_ = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase_ ( self ):
A_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 608
|
import math
def lowerCAmelCase ( snake_case__ : int )-> str:
A_ = 0
A_ = 0
while num > 0:
A_ = num % 8
A_ = octal + (remainder * math.floor(math.pow(10 , snake_case__ ) ))
counter += 1
A_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'0o{int(snake_case__ )}'
def lowerCAmelCase ( )-> None:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 608
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a_ :
'''simple docstring'''
def __init__( self , A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = 30
_SCREAMING_SNAKE_CASE = self.seq_length + self.mem_len
_SCREAMING_SNAKE_CASE = 15
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = [10, 50, 80]
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = self.vocab_size - 1
_SCREAMING_SNAKE_CASE = 0.01
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case_( self ) -> List[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case_( self , A , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFTransfoXLModel(__A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a, """mems""": mems_a}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case_( self , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = TFTransfoXLLMHeadModel(__A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model([input_ids_a, mems_a] ).to_tuple()
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(__A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case_( self , A , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = TFTransfoXLForSequenceClassification(__A )
_SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase = () if is_tf_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = TFTransfoXLModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , d_embed=37 )
def snake_case_( self ) -> List[Any]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> List[Any]:
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__A )
def snake_case_( self ) -> List[str]:
self.model_tester.set_seed()
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert isinstance(__A , tf.keras.layers.Layer )
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
def snake_case_( self ) -> int:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case_( self ) -> List[Any]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFTransfoXLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip(reason="""This model doesn\'t play well with fit() due to not returning a single loss.""" )
def snake_case_( self ) -> List[Any]:
pass
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_SCREAMING_SNAKE_CASE = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_SCREAMING_SNAKE_CASE = model.generate(__A , max_length=200 , do_sample=__A )
self.assertListEqual(output_ids[0].numpy().tolist() , __A )
| 314
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff
| 67
| 0
|
"""simple docstring"""
import copy
import re
class a__ :
snake_case_ = "hp"
snake_case_ = {}
snake_case_ = None
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = prefix
lowercase__ = defaults
cls.build_naming_info()
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return ""
lowercase__ = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(_UpperCAmelCase ) + 1 ):
lowercase__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase ):
lowercase__ = ""
while integer != 0:
lowercase__ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
lowercase__ = 0
while True:
lowercase__ = word + "#" + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
lowercase__ = sword
break
lowercase__ = short_word
lowercase__ = word
return short_word
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = param_name.split("_" )
lowercase__ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase, _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase__ = ["", "_"]
for separator in separators:
lowercase__ = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
lowercase__ = shortname
lowercase__ = param_name
return shortname
return param_name
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = TrialShortNamer.shortname_for_key(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = short_name
lowercase__ = param_name
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
lowercase__ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
lowercase__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = info
@classmethod
def snake_case__ ( cls, _UpperCAmelCase ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase__ = cls.NAMING_INFO["short_param"][k]
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = 1 if v else 0
lowercase__ = "" if isinstance(_UpperCAmelCase, (int, float) ) else "-"
lowercase__ = F'''{key}{sep}{v}'''
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def snake_case__ ( cls, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase__ = []
else:
lowercase__ = repr.split("_" )
lowercase__ = {}
for value in values:
if "-" in value:
lowercase__ , lowercase__ = value.split("-" )
else:
lowercase__ = re.sub("[0-9.]", "", _UpperCAmelCase )
lowercase__ = float(re.sub("[^0-9.]", "", _UpperCAmelCase ) )
lowercase__ = cls.NAMING_INFO["reverse_short_param"][p_k]
lowercase__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase__ = cls.DEFAULTS[k]
return parameters
| 668
|
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False ):
A : Optional[Any] = OmegaConf.load(lowerCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase_ ) ) )
return config
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None ):
if conf_path is None:
A : int = '''./model_checkpoints/vqgan_only.yaml'''
A : Optional[Any] = load_config(lowerCamelCase_ , display=lowerCamelCase_ )
A : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
A : Dict = '''./model_checkpoints/vqgan_only.pt'''
A : Optional[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_ )
if ".ckpt" in ckpt_path:
A : Optional[int] = sd['''state_dict''']
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
model.to(lowerCamelCase_ )
del sd
return model
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A , A , A : Optional[int] = model.encode(lowerCamelCase_ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
A : Optional[int] = model.decode(lowerCamelCase_ )
return xrec
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False ):
A , A : List[Any] = string.rsplit('''.''' , 1 )
if reload:
A : Tuple = importlib.import_module(lowerCamelCase_ )
importlib.reload(lowerCamelCase_ )
return getattr(importlib.import_module(lowerCamelCase_ , package=lowerCamelCase_ ) , cls )
def snake_case__ ( lowerCamelCase_ ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_=True ):
A : str = instantiate_from_config(lowerCamelCase_ )
if sd is not None:
model.load_state_dict(lowerCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
# load the specified checkpoint
if ckpt:
A : List[Any] = torch.load(lowerCamelCase_ , map_location='''cpu''' )
A : List[str] = pl_sd['''global_step''']
print(F'loaded model from global step {global_step}.' )
else:
A : List[Any] = {'''state_dict''': None}
A : List[str] = None
A : Optional[int] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowerCamelCase_ , eval_mode=lowerCamelCase_ )['''model''']
return model, global_step
| 542
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case__ ( lowerCamelCase_ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
A : List[Any] = '''mock-s3-bucket'''
A : Union[str, Any] = F's3://{mock_bucket}'
A : str = extract_path_from_uri(lowerCamelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
A : Tuple = '''./local/path'''
A : Tuple = extract_path_from_uri(lowerCamelCase_ )
assert dataset_path == new_dataset_path
def snake_case__ ( lowerCamelCase_ ):
A : int = is_remote_filesystem(lowerCamelCase_ )
assert is_remote is True
A : Dict = fsspec.filesystem('''file''' )
A : Tuple = is_remote_filesystem(lowerCamelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
A : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
A : int = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
A : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
A : str = os.path.basename(lowerCamelCase_ )
A : str = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(lowerCamelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
A : int = compressed_file_paths[protocol]
A : List[str] = '''dataset.jsonl'''
A : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}'
A , *A : List[Any] = fsspec.get_fs_token_paths(lowerCamelCase_ )
assert fs.isfile(lowerCamelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[Any] = hf_api.dataset_info(lowerCamelCase_ , token=lowerCamelCase_ )
A : List[str] = HfFileSystem(repo_info=lowerCamelCase_ , token=lowerCamelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(lowerCamelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case__ ( ):
A : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase_ , lowerCamelCase_ , clobber=lowerCamelCase_ )
with pytest.warns(lowerCamelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase_ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 542
| 1
|
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCamelCase( lowerCAmelCase__ : Any="ro" , lowerCAmelCase__ : Dict="en" , lowerCAmelCase__ : Dict="wmt16" , lowerCAmelCase__ : List[Any]=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
SCREAMING_SNAKE_CASE_ : List[Any] = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
SCREAMING_SNAKE_CASE_ : str = datasets.load_dataset(A__ , A__ )
if save_dir is None:
SCREAMING_SNAKE_CASE_ : Dict = F'''{dataset}-{pair}'''
SCREAMING_SNAKE_CASE_ : List[str] = Path(A__ )
save_dir.mkdir(exist_ok=A__ )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
SCREAMING_SNAKE_CASE_ : str = 'val' if split == 'validation' else split
SCREAMING_SNAKE_CASE_ : List[str] = save_dir.joinpath(F'''{fn}.source''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = save_dir.joinpath(F'''{fn}.target''' )
SCREAMING_SNAKE_CASE_ : Dict = src_path.open('w+' )
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
SCREAMING_SNAKE_CASE_ : str = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 714
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __a ( __A ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE_ : List[Any] = 'pt'
SCREAMING_SNAKE_CASE_ : Tuple = 'tf'
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'mock_framework'
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = FeaturesManager.determine_framework(UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE_ : List[str] = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = MagicMock(return_value=UpperCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
| 97
| 0
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 26
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26
| 1
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( a__ : List[Any] , a__ : int , a__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = MobileBertConfig.from_json_file(a__ )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
_UpperCamelCase = load_tf_weights_in_mobilebert(a__ , a__ , a__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 589
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _lowercase ( a__ : List[str] , a__ : Dict , a__ : Any ) -> Any:
"""simple docstring"""
_UpperCamelCase = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
_UpperCamelCase = f'''{src_lang}-{tgt_lang}'''
_UpperCamelCase = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(a__ , exist_ok=a__ )
_UpperCamelCase = os.path.join(a__ , "README.md" )
print(f'''Generating {path}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(a__ )
# make sure we are under the root of the project
__lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
__lowerCAmelCase = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model_name.split("""-""")
__lowerCAmelCase = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 589
| 1
|
'''simple docstring'''
from math import isqrt, loga
def _snake_case ( A_ : int ):
"""simple docstring"""
a_ : str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A_ , A_ ):
a_ : Dict = False
return [i for i in range(2 , A_ ) if is_prime[i]]
def _snake_case ( A_ : int = 80_0800 , A_ : int = 80_0800 ):
"""simple docstring"""
a_ : str = degree * loga(A_ )
a_ : str = int(A_ )
a_ : Tuple = calculate_prime_numbers(A_ )
a_ : int = 0
a_ : Tuple = 0
a_ : Tuple = len(A_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 577
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ = 16 , lowerCAmelCase_ = 88 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = "geglu" , lowerCAmelCase_ = True , lowerCAmelCase_ = True , ):
'''simple docstring'''
super().__init__()
a_ : List[Any] = num_attention_heads
a_ : Optional[int] = attention_head_dim
a_ : Union[str, Any] = num_attention_heads * attention_head_dim
a_ : Any = in_channels
a_ : List[str] = torch.nn.GroupNorm(num_groups=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , eps=1E-6 , affine=lowerCAmelCase_ )
a_ : List[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. Define transformers blocks
a_ : Any = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dropout=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , double_self_attention=lowerCAmelCase_ , norm_elementwise_affine=lowerCAmelCase_ , )
for d in range(lowerCAmelCase_ )
] )
a_ : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=None , lowerCAmelCase_ = True , ):
'''simple docstring'''
a_ , a_ , a_ , a_ : Optional[Any] = hidden_states.shape
a_ : List[Any] = batch_frames // num_frames
a_ : Tuple = hidden_states
a_ : str = hidden_states[None, :].reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a_ : Optional[Any] = self.norm(lowerCAmelCase_ )
a_ : str = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = self.proj_in(lowerCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
a_ : List[str] = block(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , class_labels=lowerCAmelCase_ , )
# 3. Output
a_ : Tuple = self.proj_out(lowerCAmelCase_ )
a_ : int = (
hidden_states[None, None, :]
.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a_ : Tuple = hidden_states.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCAmelCase_ )
| 577
| 1
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
a__ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCAmelCase ( a : int , a : Tuple , a : Optional[Any] , a : List[str] , a : List[str]=False , a : List[Any]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case__ = cached_file(a , a , force_download=not use_cached_models )
snake_case__ = config_class.from_json_file(a )
snake_case__ = True
snake_case__ = True
print(F'''Building TensorFlow model from configuration: {config}''' )
snake_case__ = model_class(a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case__ = cached_file(
a , a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case__ = load_pytorch_checkpoint_in_tfa_model(a , a )
if compare_with_pt_model:
snake_case__ = tf_model(tf_model.dummy_inputs , training=a ) # build the network
snake_case__ = torch.load(a , map_location="""cpu""" )
snake_case__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a , config=a , state_dict=a )
with torch.no_grad():
snake_case__ = pt_model(**pt_model.dummy_inputs )
snake_case__ = pto[0].numpy()
snake_case__ = tfo[0].numpy()
snake_case__ = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(a , save_format="""h5""" )
def _UpperCAmelCase ( a : Dict , a : int , a : str=None , a : int=None , a : str=False , a : Any=False , a : str=False , a : List[str]=False , ):
if args_model_type is None:
snake_case__ = list(MODEL_CLASSES.keys() )
else:
snake_case__ = [args_model_type]
for j, model_type in enumerate(a , start=1 ):
print("""=""" * 100 )
print(F''' Converting model type {j}/{len(a )}: {model_type}''' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a , a ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
snake_case__ = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(a )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
snake_case__ = cached_file(a , a , force_download=not use_cached_models )
else:
snake_case__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case__ = cached_file(a , a , force_download=not use_cached_models )
else:
snake_case__ = model_shortcut_name
if os.path.isfile(a ):
snake_case__ = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a , pytorch_checkpoint_path=a , config_file=a , tf_dump_path=os.path.join(a , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a , )
if remove_cached_files:
os.remove(a )
os.remove(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
a__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 99
|
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( a : str , a : dict ):
snake_case__ = BeautifulSoup(requests.get(a , params=a ).content , """html.parser""" )
snake_case__ = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case__ = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a__ = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 99
| 1
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('foo.json',)] )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : str ):
_A = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
_A = GenerationConfig.from_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = AutoConfig.from_pretrained('gpt2' )
_A = GenerationConfig.from_model_config(_UpperCAmelCase )
_A = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase_ ( self : Dict ):
_A = GenerationConfig()
_A = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
_A = copy.deepcopy(_UpperCAmelCase )
_A = generation_config.update(**_UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCAmelCase , {'foo': 'bar'} )
def lowerCAmelCase_ ( self : str ):
_A = GenerationConfig()
_A = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_UpperCAmelCase )
_A = GenerationConfig.from_pretrained(_UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
_A = GenerationConfig.from_model_config(_UpperCAmelCase )
assert not hasattr(_UpperCAmelCase , 'foo' ) # no new kwargs should be initialized if from config
def lowerCAmelCase_ ( self : Any ):
_A = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
_A = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
_A = GenerationConfig.from_pretrained(_UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls : Any ):
_A = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : str ):
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def lowerCAmelCase_ ( self : List[Any] ):
_A = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id='test-generation-config' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id='valid_org/test-generation-config-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
_A = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
| 7
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "gpt_neo"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Optional[int] , snake_case_ : int=5_02_57 , snake_case_ : Tuple=20_48 , snake_case_ : Optional[int]=20_48 , snake_case_ : int=24 , snake_case_ : List[Any]=[[["global", "local"], 12]] , snake_case_ : Tuple=16 , snake_case_ : Optional[int]=None , snake_case_ : Dict=2_56 , snake_case_ : List[str]="gelu_new" , snake_case_ : List[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : str=0.1 , snake_case_ : Union[str, Any]=1e-5 , snake_case_ : str=0.0_2 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=5_02_56 , snake_case_ : Tuple=5_02_56 , **snake_case_ : Optional[int] , )-> Tuple:
__lowerCAmelCase =vocab_size
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_layers
__lowerCAmelCase =num_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =window_size
__lowerCAmelCase =activation_function
__lowerCAmelCase =resid_dropout
__lowerCAmelCase =embed_dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =classifier_dropout
__lowerCAmelCase =layer_norm_epsilon
__lowerCAmelCase =initializer_range
__lowerCAmelCase =use_cache
__lowerCAmelCase =bos_token_id
__lowerCAmelCase =eos_token_id
__lowerCAmelCase =attention_types
__lowerCAmelCase =self.expand_attention_types_params(snake_case_)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"""but is `len(config.attention_layers) = {len(self.attention_layers)}`, """
F"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""")
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_)
@staticmethod
def UpperCamelCase ( snake_case_ : Optional[int])-> Any:
__lowerCAmelCase =[]
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> int:
import torch
__lowerCAmelCase =input.size()
__lowerCAmelCase =len(__lowerCamelCase )
__lowerCAmelCase =shape[dimension]
__lowerCAmelCase =torch.arange(0 , __lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =torch.div(sizedim - size , __lowerCamelCase , rounding_mode="""floor""" ) + 1
__lowerCAmelCase =torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None]
__lowerCAmelCase =[slice(__lowerCamelCase )] * rank
__lowerCAmelCase =indices
__lowerCAmelCase =input[s]
__lowerCAmelCase =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> List[str]:
import torch
__lowerCAmelCase =torch.arange(1 , __lowerCamelCase )
__lowerCAmelCase =torch.remainder(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =remainders == 0
__lowerCAmelCase =candidates[divisor_indices]
__lowerCAmelCase =torch.max(__lowerCamelCase )
return largest_divisor, torch.div(__lowerCamelCase , __lowerCamelCase , rounding_mode="""floor""" )
class __a ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Union[str, Any])-> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}})
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""")
__lowerCAmelCase ={0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase ( self : Optional[int])-> int:
return self._config.num_heads
def UpperCamelCase ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
__lowerCAmelCase =super(snake_case_ , self).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_)
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
__lowerCAmelCase , __lowerCAmelCase =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase =seqlen + 2
__lowerCAmelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase =[
(torch.zeros(snake_case_), torch.zeros(snake_case_)) for _ in range(self.num_layers)
]
__lowerCAmelCase =common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase =ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase ( self : Union[str, Any])-> int:
return 13
| 354
| 0
|
import numpy as np
import qiskit
def __UpperCAmelCase ( __a : int = 8 ,__a : int | None = None ) -> Any:
"""simple docstring"""
_a : Optional[Any] = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_a : Optional[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
_a : List[str] = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_a : str = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_a : List[str] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_a : Tuple = qiskit.QuantumCircuit(_lowerCamelCase ,name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_a : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_a : Tuple = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_a : Optional[int] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_a : Any = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_a : Optional[int] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,'''0''' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 704
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ['''names''', '''prefix''']
a__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a__ = ['''encoding_errors''', '''on_bad_lines''']
a__ = ['''date_format''']
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : str = ","
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__ : Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : Optional[list] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Union[int, List[int]]] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Union[str, List[str]]] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = "."
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : str = '"'
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : int = 10000
UpperCAmelCase__ : Optional[datasets.Features] = None
UpperCAmelCase__ : Optional[str] = "strict"
UpperCAmelCase__ : Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__ : Optional[str] = None
def __lowercase ( self ) -> Union[str, Any]:
if self.delimiter is not None:
_a : str = self.delimiter
if self.column_names is not None:
_a : Optional[int] = self.column_names
@property
def __lowercase ( self ) -> Any:
_a : int = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = CsvConfig
def __lowercase ( self ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> int:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Optional[int] = data_files
if isinstance(_a , _a ):
_a : Union[str, Any] = [files]
_a : Any = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : List[str] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[Any] = [files]
_a : Tuple = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
_a : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_a : int = table_cast(_a , _a )
return pa_table
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : List[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_a : Any = pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_a : Optional[Any] = pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise
| 578
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Dict ="codegen"
lowerCamelCase__ : Dict ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowerCamelCase=50400 , lowerCamelCase=2048 , lowerCamelCase=2048 , lowerCamelCase=4096 , lowerCamelCase=28 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=None , lowerCamelCase="gelu_new" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=1e-5 , lowerCamelCase=0.0_2 , lowerCamelCase=True , lowerCamelCase=50256 , lowerCamelCase=50256 , lowerCamelCase=False , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[Any] = vocab_size
__magic_name__ : List[Any] = n_ctx
__magic_name__ : List[Any] = n_positions
__magic_name__ : Optional[int] = n_embd
__magic_name__ : Tuple = n_layer
__magic_name__ : Optional[int] = n_head
__magic_name__ : List[Any] = n_inner
__magic_name__ : Optional[Any] = rotary_dim
__magic_name__ : Optional[int] = activation_function
__magic_name__ : Optional[int] = resid_pdrop
__magic_name__ : Tuple = embd_pdrop
__magic_name__ : List[str] = attn_pdrop
__magic_name__ : Dict = layer_norm_epsilon
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = use_cache
__magic_name__ : Dict = bos_token_id
__magic_name__ : str = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , tie_word_embeddings=lowerCamelCase , **lowerCamelCase )
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase , lowerCamelCase = "default" , lowerCamelCase = None , lowerCamelCase = False , ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase , task=lowerCamelCase , patching_specs=lowerCamelCase , use_past=lowerCamelCase )
if not getattr(self._config , '''pad_token_id''' , lowerCamelCase ):
# TODO: how to do that better?
__magic_name__ : int = 0
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__magic_name__ : List[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction='''inputs''' )
__magic_name__ : List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__magic_name__ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return self._config.n_head
def lowercase ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = super(lowerCamelCase , self ).generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__magic_name__ : Optional[int] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__magic_name__ , __magic_name__ : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__magic_name__ : Union[str, Any] = seqlen + 2
__magic_name__ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__ : Any = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__magic_name__ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
__magic_name__ : Any = ordered_inputs['''attention_mask'''].dtype
__magic_name__ : List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self ) -> int:
"""simple docstring"""
return 13
| 154
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="openai/whisper-base"
lowerCamelCase__ : Any =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowerCamelCase__ : Union[str, Any] ="transcriber"
lowerCamelCase__ : List[str] =WhisperProcessor
lowerCamelCase__ : Tuple =WhisperForConditionalGeneration
lowerCamelCase__ : Tuple =["audio"]
lowerCamelCase__ : List[str] =["text"]
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(lowerCamelCase , return_tensors='''pt''' ).input_features
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 154
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
torch.manual_seed(0 )
A : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Dict =self.dummy_uncond_unet
A : int =ScoreSdeVeScheduler()
A : Union[str, Any] =ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.manual_seed(0 )
A : Dict =sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ ).images
A : List[Any] =torch.manual_seed(0 )
A : List[str] =sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
A : List[Any] =image[0, -3:, -3:, -1]
A : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : str =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : int ='google/ncsnpp-church-256'
A : Dict =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : List[str] =sde_ve(num_inference_steps=10 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ ).images
A : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Dict =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661
| 1
|
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __lowercase ( _a ):
snake_case_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(_a , _a )
if "finetuned" not in model_name:
snake_case_ : int = False
if "finetuned" in model_name:
snake_case_ : List[Any] = '''huggingface/label-files'''
if "kinetics" in model_name:
snake_case_ : Optional[int] = 400
snake_case_ : List[Any] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
snake_case_ : int = 174
snake_case_ : Dict = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Any = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[Any] = idalabel
snake_case_ : str = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _a , _a ):
if "small" in model_name:
snake_case_ : Optional[Any] = 384
snake_case_ : Optional[Any] = 1_536
snake_case_ : Tuple = 12
snake_case_ : List[str] = 16
snake_case_ : int = 12
snake_case_ : Union[str, Any] = 3
snake_case_ : Any = 192
snake_case_ : Any = 768
elif "large" in model_name:
snake_case_ : Any = 1_024
snake_case_ : Optional[int] = 4_096
snake_case_ : Optional[Any] = 24
snake_case_ : Optional[int] = 16
snake_case_ : Optional[Any] = 12
snake_case_ : Any = 8
snake_case_ : Optional[int] = 512
snake_case_ : List[Any] = 2_048
elif "huge" in model_name:
snake_case_ : int = 1_280
snake_case_ : Tuple = 5_120
snake_case_ : str = 32
snake_case_ : Dict = 16
snake_case_ : List[Any] = 12
snake_case_ : Union[str, Any] = 8
snake_case_ : str = 640
snake_case_ : Optional[int] = 2_560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def __lowercase ( _a ):
if "encoder." in name:
snake_case_ : int = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
snake_case_ : Dict = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
snake_case_ : Optional[Any] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case_ : Tuple = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case_ : Optional[int] = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ : str = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
snake_case_ : Union[str, Any] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case_ : Optional[int] = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
snake_case_ : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
snake_case_ : Tuple = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
snake_case_ : Any = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
snake_case_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ : str = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ : str = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case_ : Union[str, Any] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case_ : Optional[Any] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case_ : str = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case_ : Dict = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case_ : Union[str, Any] = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
snake_case_ : str = name.replace('''head''' , '''classifier''' )
return name
def __lowercase ( _a , _a ):
for key in orig_state_dict.copy().keys():
snake_case_ : Union[str, Any] = orig_state_dict.pop(_a )
if key.startswith('''encoder.''' ):
snake_case_ : Union[str, Any] = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
snake_case_ : Optional[int] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
snake_case_ : int = config.decoder_hidden_size
snake_case_ : List[str] = int(key_split[2] )
snake_case_ : Tuple = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : Union[str, Any] = val[dim : dim * 2, :]
snake_case_ : List[Any] = val[-dim:, :]
else:
snake_case_ : Optional[int] = config.hidden_size
snake_case_ : Any = int(key_split[1] )
snake_case_ : int = '''videomae.encoder.layer.'''
if "weight" in key:
snake_case_ : Any = val[:dim, :]
snake_case_ : Dict = val[dim : dim * 2, :]
snake_case_ : Dict = val[-dim:, :]
else:
snake_case_ : str = val
return orig_state_dict
def __lowercase ( ):
snake_case_ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case_ : Optional[Any] = np.load(_a )
return list(_a )
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : Any = get_videomae_config(_a )
if "finetuned" in model_name:
snake_case_ : Tuple = VideoMAEForVideoClassification(_a )
else:
snake_case_ : Optional[Any] = VideoMAEForPreTraining(_a )
# download original checkpoint, hosted on Google Drive
snake_case_ : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(_a , _a , quiet=_a )
snake_case_ : Optional[Any] = torch.load(_a , map_location='''cpu''' )
if "model" in files:
snake_case_ : Tuple = files['''model''']
else:
snake_case_ : Optional[Any] = files['''module''']
snake_case_ : Optional[Any] = convert_state_dict(_a , _a )
model.load_state_dict(_a )
model.eval()
# verify model on basic input
snake_case_ : Optional[int] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case_ : int = prepare_video()
snake_case_ : Dict = image_processor(_a , return_tensors='''pt''' )
if "finetuned" not in model_name:
snake_case_ : List[Any] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case_ : Union[str, Any] = torch.load(_a )
snake_case_ : Tuple = model(**_a )
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Union[str, Any] = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case_ : Any = torch.Size([1, 400] )
snake_case_ : int = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case_ : Tuple = torch.Size([1, 174] )
snake_case_ : Optional[Any] = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case_ : int = torch.Size([1, 1_408, 1_536] )
snake_case_ : Dict = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case_ : int = torch.Size([1, 1_408, 1_536] )
snake_case_ : Optional[Any] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case_ : List[str] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case_ : List[str] = torch.Size([1, 1_408, 1_536] )
snake_case_ : Optional[int] = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case_ : List[str] = torch.Size([1, 400] )
snake_case_ : int = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case_ : List[str] = torch.Size([1, 400] )
snake_case_ : Optional[Any] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case_ : Tuple = torch.Size([1, 400] )
snake_case_ : List[str] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case_ : Tuple = torch.Size([1, 400] )
snake_case_ : str = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case_ : Optional[Any] = torch.Size([1, 1_408, 1_536] )
snake_case_ : Dict = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case_ : str = torch.Size([1, 174] )
snake_case_ : int = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case_ : str = torch.Size([1, 1_408, 1_536] )
snake_case_ : Optional[Any] = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case_ : Dict = torch.Size([1, 174] )
snake_case_ : Union[str, Any] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _a , atol=1E-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _a , atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case_ : List[str] = outputs.loss
assert torch.allclose(_a , _a , atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_a )
model.save_pretrained(_a )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(_a , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 123
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = """gpt_bigcode"""
_lowerCAmelCase : Union[str, Any] = ["""past_key_values"""]
_lowerCAmelCase : List[str] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , lowercase_ : int=50257 , lowercase_ : List[Any]=1024 , lowercase_ : Optional[int]=768 , lowercase_ : List[str]=12 , lowercase_ : Any=12 , lowercase_ : List[str]=None , lowercase_ : Dict="gelu_pytorch_tanh" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]=50256 , lowercase_ : Any=50256 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , **lowercase_ : List[Any] , ):
snake_case_ : int = vocab_size
snake_case_ : List[Any] = n_positions
snake_case_ : List[Any] = n_embd
snake_case_ : Any = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Any = n_inner
snake_case_ : Dict = activation_function
snake_case_ : Tuple = resid_pdrop
snake_case_ : Optional[int] = embd_pdrop
snake_case_ : List[str] = attn_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[int] = scale_attn_weights
snake_case_ : str = use_cache
snake_case_ : Tuple = attention_softmax_in_fpaa
snake_case_ : Optional[int] = scale_attention_softmax_in_fpaa
snake_case_ : Optional[int] = multi_query
snake_case_ : str = bos_token_id
snake_case_ : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 123
| 1
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str=13 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Any=99 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Any=4 , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Optional[Any]=16 , _UpperCamelCase : int=2 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Optional[Any]=None , ) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : List[Any] = use_input_mask
_lowerCamelCase : Union[str, Any] = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Dict = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices)
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = NystromformerModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_lowerCamelCase : Any = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_lowerCamelCase : Any = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = NystromformerForMaskedLM(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = NystromformerForQuestionAnswering(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : int = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : str , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Any = NystromformerForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : Optional[Any] = NystromformerForTokenClassification(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = self.num_choices
_lowerCamelCase : List[str] = NystromformerForMultipleChoice(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_lowerCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_lowerCamelCase : Union[str, Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : int) ->str:
"""simple docstring"""
_lowerCamelCase : List[Any] = NystromformerModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Dict = type
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = NystromformerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
_lowerCamelCase : Tuple = model(UpperCAmelCase__)[0]
_lowerCamelCase : int = torch.Size((1, 6, 768))
self.assertEqual(output.shape , UpperCAmelCase__)
_lowerCamelCase : Dict = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''the [MASK] of Belgium is Brussels'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : Optional[int] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""")
_lowerCamelCase : List[Any] = tokenizer(UpperCAmelCase__ , return_tensors="""pt""")
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(encoding.input_ids).logits
_lowerCamelCase : Tuple = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase__) , """capital""")
| 721
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15
| 0
|
from typing import Any
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
class a_ :
'''simple docstring'''
def __init__( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = None
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.head
while temp is not None:
print(temp.data , end=' ' )
lowerCAmelCase_ = temp.next
print()
def _lowercase ( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = Node(lowercase_ )
lowerCAmelCase_ = self.head
lowerCAmelCase_ = new_node
def _lowercase ( self , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
lowerCAmelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase_ = node_a.next
lowerCAmelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase_ = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase_ , lowerCAmelCase_ = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 318
|
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
def update_area_of_max_square(a_ , a_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCAmelCase_ = update_area_of_max_square(a_ , col + 1 )
lowerCAmelCase_ = update_area_of_max_square(row + 1 , col + 1 )
lowerCAmelCase_ = update_area_of_max_square(row + 1 , a_ )
if mat[row][col]:
lowerCAmelCase_ = 1 + min([right, diagonal, down] )
lowerCAmelCase_ = max(largest_square_area[0] , a_ )
return sub_problem_sol
else:
return 0
lowerCAmelCase_ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
def update_area_of_max_square_using_dp_array(
a_ , a_ , a_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCAmelCase_ = update_area_of_max_square_using_dp_array(a_ , col + 1 , a_ )
lowerCAmelCase_ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a_ )
lowerCAmelCase_ = update_area_of_max_square_using_dp_array(row + 1 , a_ , a_ )
if mat[row][col]:
lowerCAmelCase_ = 1 + min([right, diagonal, down] )
lowerCAmelCase_ = max(largest_square_area[0] , a_ )
lowerCAmelCase_ = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCAmelCase_ = [0]
lowerCAmelCase_ = [[-1] * cols for _ in range(a_ )]
update_area_of_max_square_using_dp_array(0 , 0 , a_ )
return largest_square_area[0]
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCAmelCase_ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ = dp_array[row][col + 1]
lowerCAmelCase_ = dp_array[row + 1][col + 1]
lowerCAmelCase_ = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCAmelCase_ = 1 + min(a_ , a_ , a_ )
lowerCAmelCase_ = max(dp_array[row][col] , a_ )
else:
lowerCAmelCase_ = 0
return largest_square_area
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = [0] * (cols + 1)
lowerCAmelCase_ = [0] * (cols + 1)
lowerCAmelCase_ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCAmelCase_ = current_row[col + 1]
lowerCAmelCase_ = next_row[col + 1]
lowerCAmelCase_ = next_row[col]
if mat[row][col] == 1:
lowerCAmelCase_ = 1 + min(a_ , a_ , a_ )
lowerCAmelCase_ = max(current_row[col] , a_ )
else:
lowerCAmelCase_ = 0
lowerCAmelCase_ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 318
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 702
|
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
| 0
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
lowercase_ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__(self , A , A=False , A=False , A=False , A=None , A=None , A=None , A=None , A = None , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
_a = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_a = '''<|endoftext|>''' if eos_token is None else eos_token
_a = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_a = unk_token if pad_token is None else pad_token
_a = eos_token if bos_token is None else bos_token
else:
_a = '''<pad>''' if pad_token is None else pad_token
_a = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
_a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_a = re.compile(
f'''[{''.join(map(A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__(self ) -> Tuple:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> str:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a__ (self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def a__ (self , A ) -> str:
"""simple docstring"""
_a = self.non_printing_characters_re.sub('''''' , A )
# Normalize whitespaces
_a = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
_a = unicodedata.normalize('''NFC''' , A )
return text
def a__ (self , A , **A ) -> List[str]:
"""simple docstring"""
_a = self.preprocess_text(A )
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(A )
def a__ (self , A ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(A )
@staticmethod
def a__ (A ) -> str:
"""simple docstring"""
return out_string
def a__ (self , A ) -> str:
"""simple docstring"""
_a = []
_a = ''''''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_a = True
_a = []
else:
current_sub_tokens.append(A )
_a = False
out_string += self.sp_model.decode(A )
return out_string
def a__ (self ) -> Dict[str, int]:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def a__ (self , A , A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(A , A ):
_a = self.preprocess_text(A )
_a = self.sp_model.encode(A )
else:
_a = [self.preprocess_text(A ) for t in text]
_a = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
_a = torch.tensor(A )
return token_ids
def a__ (self , A ) -> str:
"""simple docstring"""
return self.sp_model.decode(A )
def a__ (self , A ) -> List[int]:
"""simple docstring"""
_a = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
_a = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=A )
| 11
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( UpperCamelCase_ : Tuple ):
UpperCAmelCase__ :Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_, UpperCamelCase_ )
def a__ ( UpperCamelCase_ : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = emb.weight.shape
UpperCAmelCase__ :int = nn.Linear(UpperCamelCase_, UpperCamelCase_, bias=UpperCamelCase_ )
UpperCAmelCase__ :Tuple = emb.weight.data
return lin_layer
def a__ ( UpperCamelCase_ : str ):
UpperCAmelCase__ :List[str] = torch.load(UpperCamelCase_, map_location='''cpu''' )
UpperCAmelCase__ :List[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
UpperCAmelCase__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(UpperCamelCase_ )
UpperCAmelCase__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase__ :Union[str, Any] = MaMaaaConfig(
vocab_size=UpperCamelCase_, max_position_embeddings=1_024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
UpperCAmelCase__ :str = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase__ :Tuple = MaMaaaForConditionalGeneration(UpperCamelCase_ )
model.model.load_state_dict(UpperCamelCase_, strict=UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 467
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 4000000 ) -> int:
lowerCamelCase : List[Any] = [0, 1]
lowerCamelCase : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Any = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[Any] = "audio-spectrogram-transformer"
def __init__( self , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1E-12 , _A=1_6 , _A=True , _A=1_0 , _A=1_0 , _A=1_0_2_4 , _A=1_2_8 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =frequency_stride
_SCREAMING_SNAKE_CASE =time_stride
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =num_mel_bins
| 255
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : List[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Any = 99
UpperCAmelCase_ : List[Any] = 384
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : Optional[Any] = 37
UpperCAmelCase_ : List[str] = '''gelu'''
UpperCAmelCase_ : Tuple = 0.1
UpperCAmelCase_ : Optional[Any] = 0.1
UpperCAmelCase_ : Tuple = 512
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : int = 0.02
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : Union[str, Any] = 128
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = 9
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Optional[int] = None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : List[str] = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=_SCREAMING_SNAKE_CASE ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : str = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = [input_ids, input_mask]
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.num_choices
UpperCAmelCase_ : Optional[int] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Tuple = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : int = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : int = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : str = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = TFConvBertModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> int:
self.config_tester.run_common_tests()
def a__ ( self ) -> str:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Tuple:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Union[str, Any] = True
if hasattr(_SCREAMING_SNAKE_CASE ,'''use_cache''' ):
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Any = getattr(self.model_tester ,'''encoder_seq_length''' ,self.model_tester.seq_length )
UpperCAmelCase_ : str = getattr(self.model_tester ,'''key_length''' ,_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE ,saved_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''saved_model''' ,'''1''' )
UpperCAmelCase_ : Optional[int] = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase_ : Tuple = outputs['''encoder_hidden_states''']
UpperCAmelCase_ : Optional[int] = outputs['''encoder_attentions''']
else:
UpperCAmelCase_ : int = outputs['''hidden_states''']
UpperCAmelCase_ : Optional[int] = outputs['''attentions''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_, UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester ,'''decoder_seq_length''' ,self.model_tester.seq_length )
UpperCAmelCase_ : Tuple = getattr(self.model_tester ,'''encoder_seq_length''' ,self.model_tester.seq_length )
UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester ,'''key_length''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = getattr(self.model_tester ,'''key_length''' ,_SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 ,0 )
UpperCAmelCase_ : List[str] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = True
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states ,_SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states ,_SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states ,_SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states ,_SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
UpperCAmelCase_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 )
| 300
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : List[Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : List[str] = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : List[str] = do_rescale
UpperCAmelCase_ : Any = rescale_factor
UpperCAmelCase_ : Tuple = do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : List[str] = do_convert_rgb
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : Tuple = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size=size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Any:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Optional[int] = size if size is not None else self.size
UpperCAmelCase_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''size''' ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Dict = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[Any] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Any = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase_ : str = [self.center_crop(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase_ : int = [self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase_ : str = [self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase_ : List[str] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase_ : int = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
| 300
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase_ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __lowerCAmelCase ( tr.AbstractTransform ):
def __init__(self , __magic_name__ = " " ) -> int:
'''simple docstring'''
snake_case_ : Dict = sentence_delimiter
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
return list(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = []
for sent_idx, sentence in enumerate(__magic_name__ ):
chars.extend(self.process_string(__magic_name__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__magic_name__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCAmelCase_ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowerCAmelCase_ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Dict:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__magic_name__ , __magic_name__ , truth_transform=__magic_name__ , hypothesis_transform=__magic_name__ , )["wer"]
snake_case_ : List[Any] = 0
snake_case_ : int = 0
for prediction, reference in zip(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = jiwer.compute_measures(
__magic_name__ , __magic_name__ , truth_transform=__magic_name__ , hypothesis_transform=__magic_name__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60
|
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60
| 1
|
'''simple docstring'''
from typing import Any
def _lowerCAmelCase( UpperCAmelCase_ : list ) -> list[Any]:
if not input_list:
return []
lowerCAmelCase__ = [input_list.count(UpperCAmelCase_ ) for value in input_list]
lowerCAmelCase__ = max(UpperCAmelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCAmelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCamelCase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( _A ):
'''simple docstring'''
A__ = '''maskformer'''
A__ = {'''hidden_size''': '''mask_feature_size'''}
A__ = ['''resnet''', '''swin''']
A__ = ['''detr''']
def __init__( self : Tuple , __A : int = 256 , __A : int = 256 , __A : float = 0.1 , __A : bool = False , __A : Optional[Dict] = None , __A : Optional[Dict] = None , __A : float = 0.0_2 , __A : float = 1.0 , __A : float = 1.0 , __A : float = 1.0 , __A : float = 2_0.0 , __A : Optional[bool] = None , **__A : Tuple , ) -> List[Any]:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCAmelCase__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
lowerCAmelCase__ = backbone_config.pop("""model_type""" )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCAmelCase__ = DetrConfig()
else:
# verify that the decoder is supported
lowerCAmelCase__ = (
decoder_config.pop("""model_type""" ) if isinstance(__A , __A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(__A , __A ):
lowerCAmelCase__ = CONFIG_MAPPING[decoder_type]
lowerCAmelCase__ = config_class.from_dict(__A )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = decoder_config
# main feature dimension for the model
lowerCAmelCase__ = fpn_feature_size
lowerCAmelCase__ = mask_feature_size
# initializer
lowerCAmelCase__ = init_std
lowerCAmelCase__ = init_xavier_std
# Hungarian matcher && loss
lowerCAmelCase__ = cross_entropy_weight
lowerCAmelCase__ = dice_weight
lowerCAmelCase__ = mask_weight
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = no_object_weight
lowerCAmelCase__ = output_auxiliary_logits
lowerCAmelCase__ = self.decoder_config.encoder_attention_heads
lowerCAmelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def lowercase__ ( cls : List[Any] , __A : PretrainedConfig , __A : PretrainedConfig , **__A : Tuple ) -> Any:
'''simple docstring'''
return cls(
backbone_config=__A , decoder_config=__A , **__A , )
def lowercase__ ( self : Optional[int] ) -> Dict[str, any]:
'''simple docstring'''
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.decoder_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 211
| 1
|
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(a_ , a_ ):
return 0
elif n == 2:
return 1
else:
__A = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = 2
while digits < n:
index += 1
__A = len(str(fibonacci(a_ ) ) )
return index
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 410
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A =logging.get_logger(__name__)
A ={
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
__a : Tuple = """dinat"""
__a : Union[str, Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , lowercase : Tuple=4 , lowercase : str=3 , lowercase : str=64 , lowercase : Optional[Any]=[3, 4, 6, 5] , lowercase : str=[2, 4, 8, 16] , lowercase : Dict=7 , lowercase : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase : str=3.0 , lowercase : Optional[Any]=True , lowercase : Dict=0.0 , lowercase : Optional[Any]=0.0 , lowercase : Union[str, Any]=0.1 , lowercase : List[str]="gelu" , lowercase : str=0.02 , lowercase : Tuple=1E-5 , lowercase : Any=0.0 , lowercase : Union[str, Any]=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**_a )
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(_a )
UpperCAmelCase = num_heads
UpperCAmelCase = kernel_size
UpperCAmelCase = dilations
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(_a ) - 1) )
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(_a ) + 1 )]
UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 710
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A =logging.get_logger(__name__)
@add_end_docstrings(__a )
class _a ( __a ):
def __init__( self : Optional[int] , *lowercase : Any , **lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
self.check_model_type(lowercase )
def A ( self : List[str] , lowercase : str=None , lowercase : List[str]=None , lowercase : List[str]=None , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , lowercase : Union["Image.Image", str] , lowercase : str = None , **lowercase : Optional[int] ):
'''simple docstring'''
if isinstance(lowercase , (Image.Image, str) ) and isinstance(lowercase , lowercase ):
UpperCAmelCase = {'''image''': image, '''question''': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase , **lowercase )
return results
def A ( self : List[Any] , lowercase : List[str] , lowercase : Any=False , lowercase : Any=False ):
'''simple docstring'''
UpperCAmelCase = load_image(inputs['''image'''] )
UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=lowercase , truncation=lowercase )
UpperCAmelCase = self.image_processor(images=lowercase , return_tensors=self.framework )
model_inputs.update(lowercase )
return model_inputs
def A ( self : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model(**lowercase )
return model_outputs
def A ( self : Union[str, Any] , lowercase : int , lowercase : Optional[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 358
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669
| 1
|
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 )->int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 664
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Optional[int] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 255 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = True , **snake_case_ : List[Any] , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A : List[str] = size if size is not None else {'''shortest_edge''': 224}
A : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
A : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A : List[str] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='''crop_size''' )
A : str = do_resize
A : Tuple = size
A : Tuple = resample
A : int = do_center_crop
A : List[str] = crop_size
A : List[str] = do_rescale
A : Optional[int] = rescale_factor
A : Tuple = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A : int = image_std if image_std is not None else OPENAI_CLIP_STD
A : List[Any] = do_convert_rgb
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
"""simple docstring"""
A : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A : Dict = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[Any] , ):
"""simple docstring"""
A : Tuple = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
"""simple docstring"""
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
"""simple docstring"""
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **snake_case_ : Any , ):
"""simple docstring"""
A : Optional[Any] = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = size if size is not None else self.size
A : Optional[Any] = get_size_dict(snake_case_ , param_name='''size''' , default_to_square=snake_case_ )
A : Tuple = resample if resample is not None else self.resample
A : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
A : Optional[int] = get_size_dict(snake_case_ , param_name='''crop_size''' , default_to_square=snake_case_ )
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Any = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : str = image_std if image_std is not None else self.image_std
A : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A : Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A : Optional[int] = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
A : Dict = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
A : List[Any] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
A : Dict = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
A : Optional[int] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
A : Any = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
A : int = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 256
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = "▁"
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
UpperCamelCase_ = {
"google/pegasus-xsum": 5_12,
}
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int="<pad>" , snake_case_ : Any="</s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Optional[Any]="<mask_2>" , snake_case_ : Union[str, Any]="<mask_1>" , snake_case_ : Any=None , snake_case_ : str=103 , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : Union[str, Any] , ):
"""simple docstring"""
A : str = offset
if additional_special_tokens is not None:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(snake_case_ )}, but is"""
f""" {type(snake_case_ )}""" )
A : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(snake_case_ ) , self.offset - 1 )
]
if len(set(snake_case_ ) ) != len(snake_case_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
A : Union[str, Any] = additional_special_tokens_extended
else:
A : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , mask_token=snake_case_ , pad_token=snake_case_ , mask_token_sent=snake_case_ , offset=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
A : Dict = mask_token_sent
A : Optional[int] = vocab_file
A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# add special tokens to encoder dict
A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Dict = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
"""simple docstring"""
A : Optional[Any] = self.__dict__.copy()
A : Union[str, Any] = None
return state
def __setstate__( self : str , snake_case_ : Tuple ):
"""simple docstring"""
A : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A : Union[str, Any] = {}
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Tuple , snake_case_ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def _UpperCAmelCase ( self : Tuple , snake_case_ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A : int = self.sp_model.piece_to_id(snake_case_ )
return sp_id + self.offset
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A : Any = self.sp_model.IdToPiece(index - self.offset )
return token
def _UpperCAmelCase ( self : Dict , snake_case_ : Dict ):
"""simple docstring"""
A : List[Any] = []
A : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
A : Any = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def _UpperCAmelCase ( self : str , snake_case_ : Any=False ):
"""simple docstring"""
return 1
def _UpperCAmelCase ( self : int , snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : str = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCAmelCase ( self : int , snake_case_ : List , snake_case_ : Optional[List] = None , snake_case_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case_ )
elif token_ids_a is None:
return self._special_token_mask(snake_case_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCAmelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Tuple=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : int = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
A : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 256
| 1
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,**lowerCamelCase_ ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
requires_backends(self ,"""vision""" )
requires_backends(self ,"""torch""" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCamelCase_ )
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Tuple:
A = {}
A = {}
A = {}
# preprocess args
if "points_per_batch" in kwargs:
A = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,lowerCamelCase_ ,*lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> List[str]:
return super().__call__(lowerCamelCase_ ,*lowerCamelCase_ ,num_workers=lowerCamelCase_ ,batch_size=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=6_4 ,lowerCamelCase_ = 0 ,lowerCamelCase_ = 5_1_2 / 1_5_0_0 ,lowerCamelCase_ = 3_2 ,lowerCamelCase_ = 1 ,) -> List[Any]:
A = load_image(lowerCamelCase_ )
A = self.image_processor.size["""longest_edge"""]
A , A , A , A = self.image_processor.generate_crop_boxes(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = self.image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A = self.get_inference_context()
with inference_context():
A = self._ensure_tensor_on_device(lowerCamelCase_ ,device=self.device )
A = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A = image_embeddings
A = grid_points.shape[1]
A = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
A = grid_points[:, i : i + points_per_batch, :, :]
A = input_labels[:, i : i + points_per_batch]
A = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=0.88 ,lowerCamelCase_=0.95 ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,) -> Optional[Any]:
A = model_inputs.pop("""input_boxes""" )
A = model_inputs.pop("""is_last""" )
A = model_inputs.pop("""original_sizes""" ).tolist()
A = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A = self.model(**lowerCamelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A = model_outputs["""pred_masks"""]
A = self.image_processor.post_process_masks(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,binarize=lowerCamelCase_ )
A = model_outputs["""iou_scores"""]
A , A , A = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,lowerCamelCase_=False ,lowerCamelCase_=0.7 ,) -> Optional[int]:
A = []
A = []
A = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A = torch.cat(lowerCamelCase_ )
A = torch.cat(lowerCamelCase_ )
A , A , A , A = self.image_processor.post_process_for_mask_generation(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = defaultdict(lowerCamelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase_ )
A = {}
if output_rle_mask:
A = rle_mask
if output_bboxes_mask:
A = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 702
|
"""simple docstring"""
from __future__ import annotations
def _A ( _a : list[int] , _a : int ):
"""simple docstring"""
A = []
A = []
A = 0
A = sum(_a )
create_state_space_tree(_a , _a , _a , _a , _a , _a )
return result
def _A ( _a : list[int] , _a : int , _a : int , _a : list[int] , _a : list[list[int]] , _a : int , ):
"""simple docstring"""
if sum(_a ) > max_sum or (remaining_nums_sum + sum(_a )) < max_sum:
return
if sum(_a ) == max_sum:
result.append(_a )
return
for index in range(_a , len(_a ) ):
create_state_space_tree(
_a , _a , index + 1 , [*path, nums[index]] , _a , remaining_nums_sum - nums[index] , )
UpperCAmelCase =[3, 34, 4, 12, 5, 2]
UpperCAmelCase =9
UpperCAmelCase =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 255
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "falcon"
snake_case__ : Any = ["past_key_values"]
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=6_5_0_2_4 , __lowerCAmelCase : Optional[Any]=4_5_4_4 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=7_1 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=1_1 , __lowerCAmelCase : Optional[Any]=1_1 , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCamelCase : Any = kwargs.pop('''n_embed''' , __lowerCAmelCase )
_lowerCamelCase : Tuple = hidden_size if n_embed is None else n_embed
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Dict = layer_norm_epsilon
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[Any] = eos_token_id
_lowerCamelCase : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCamelCase : Optional[Any] = alibi
_lowerCamelCase : str = new_decoder_architecture
_lowerCamelCase : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
_lowerCamelCase : List[Any] = parallel_attn
_lowerCamelCase : str = bias
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return not self.alibi
| 83
|
"""simple docstring"""
snake_case_ : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase_ ( _lowercase : bytes ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : int = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowercase )
UpperCAmelCase : List[Any] = "".join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase : int = B"=" * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase : List[Any] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : Optional[Any] = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCAmelCase : List[str] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase : List[str] = encoded_data[:-padding]
UpperCAmelCase : Any = "".join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase : Any = "".join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A : int = logging.getLogger(__name__)
@dataclass
class lowercase_ :
__UpperCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__UpperCamelCase = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(default=lowerCAmelCase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
__UpperCamelCase = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__UpperCamelCase = field(
default=lowerCAmelCase__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__UpperCamelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
__lowerCAmelCase = import_module("""tasks""" )
try:
__lowerCAmelCase = getattr(UpperCamelCase__ , model_args.task_type )
__lowerCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__lowerCAmelCase = token_classification_task.get_labels(data_args.labels )
__lowerCAmelCase = dict(enumerate(UpperCamelCase__ ) )
__lowerCAmelCase = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , cache_dir=model_args.cache_dir , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__lowerCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCAmelCase = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCAmelCase = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase__ , UpperCamelCase__ ) -> Tuple[List[int], List[int]]:
__lowerCAmelCase = np.argmax(UpperCamelCase__ , axis=2 )
__lowerCAmelCase , __lowerCAmelCase = preds.shape
__lowerCAmelCase = [[] for _ in range(UpperCamelCase__ )]
__lowerCAmelCase = [[] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase__ ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
# Data collator
__lowerCAmelCase = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , UpperCamelCase__ , UpperCamelCase__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(UpperCamelCase__ )
# Predict
if training_args.do_predict:
__lowerCAmelCase = TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = trainer.predict(UpperCamelCase__ )
__lowerCAmelCase , __lowerCAmelCase = align_predictions(UpperCamelCase__ , UpperCamelCase__ )
__lowerCAmelCase = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , UpperCamelCase__ , UpperCamelCase__ )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
__lowerCAmelCase = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return results
def UpperCAmelCase ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 334
|
import argparse
import os
import re
__A : List[Any] = "src/diffusers"
# Pattern that looks at the indentation in a line.
__A : Dict = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Optional[int] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : Union[str, Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__A : List[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : List[str] = re.compile(r"\[([^\]]+)\]")
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
__lowerCAmelCase = ["""\n""".join(lines[:index] )]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append("""\n""".join(UpperCamelCase__ ) )
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append("""\n""".join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCAmelCase ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
def _inner(UpperCamelCase__ ):
return key(UpperCamelCase__ ).lower().replace("""_""" , """""" )
return _inner
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
def noop(UpperCamelCase__ ):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
__lowerCAmelCase = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
def _replace(UpperCamelCase__ ):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]"
__lowerCAmelCase = import_statement.split("""\n""" )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1
__lowerCAmelCase = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCAmelCase = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split("""\n""" )
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] )
__lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase__ , """w""" ) as f:
f.write("""\n""".join(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__=True ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(UpperCamelCase__ , """__init__.py""" ) , check_only=UpperCamelCase__ )
if result:
__lowerCAmelCase = [os.path.join(UpperCamelCase__ , """__init__.py""" )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__A : Any = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 334
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase_ = TaTokenizerFast
UpperCamelCase_ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase_ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 256
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCamelCase ( ):
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''rsa''' , 1024 )
print('''Key files generation successful.''' )
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
print('''Generating prime p...''' )
A : Optional[Any] = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
A : Tuple = rabinMiller.generate_large_prime(lowerCamelCase_ )
A : Optional[Any] = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
A : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
A : Optional[Any] = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
A : List[Any] = (n, e)
A : Optional[Any] = (n, d)
return (public_key, private_key)
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
A , A : Union[str, Any] = generate_key(lowerCamelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 256
| 1
|
'''simple docstring'''
def __A ( lowerCAmelCase_ = 100_0000 ):
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_UpperCAmelCase : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_UpperCAmelCase : Tuple = counter
if counter > pre_counter:
_UpperCAmelCase : Dict = inputa
_UpperCAmelCase : Optional[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 156
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCAmelCase : Union[str, Any] = processors[data_args.task_name]()
_UpperCAmelCase : int = processor.get_labels()
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
_UpperCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : int = trainer.evaluate()
_UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 156
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase_ = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None ) -> List[str]:
_lowerCAmelCase : Optional[int] = True
while ask_again:
_lowerCAmelCase : Dict = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]=[] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=0 ) -> str:
_lowerCAmelCase : Tuple = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[Any] = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase : Optional[int] = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : Tuple = int(_SCREAMING_SNAKE_CASE )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> str:
_lowerCAmelCase : Union[str, Any] = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Optional[Any]:
_lowerCAmelCase : Optional[Any] = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase : str = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Any:
return {"yes": True, "no": False}[value.lower()]
class a_ (argparse.RawDescriptionHelpFormatter ):
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 384
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""yjernite/retribert-base-uncased""": 512,
}
lowercase_ = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[Any] = RetriBertTokenizer
_UpperCamelCase : Dict = ['input_ids', 'attention_mask']
def __init__( self : str , a : Any=None , a : Optional[Any]=None , a : Dict=True , a : Union[str, Any]="[UNK]" , a : int="[SEP]" , a : Union[str, Any]="[PAD]" , a : str="[CLS]" , a : List[Any]="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : Any , )-> Optional[int]:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowercase__ = getattr(a , normalizer_state.pop('type' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**a )
lowercase__ = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[Any] , a : int=None )-> Optional[Any]:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 235
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __get__( self : Dict ,__A : Any ,__A : Tuple=None ) -> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
_lowercase = '__cached_' + self.fget.__name__
_lowercase = getattr(__A ,__A ,__A )
if cached is None:
_lowercase = self.fget(__A )
setattr(__A ,__A ,__A )
return cached
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Optional[int]:
_lowercase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> List[str]:
if is_torch_fx_proxy(snake_case__ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
return isinstance(snake_case__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
return _is_numpy(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Optional[int]:
import torch
return isinstance(snake_case__ , torch.Tensor )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> List[str]:
import torch
return isinstance(snake_case__ , torch.device )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] ) -> int:
return False if not is_torch_available() else _is_torch_device(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> List[Any]:
import torch
if isinstance(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , snake_case__ ):
_lowercase = getattr(snake_case__ , snake_case__ )
else:
return False
return isinstance(snake_case__ , torch.dtype )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
return False if not is_torch_available() else _is_torch_dtype(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] ) -> List[Any]:
import tensorflow as tf
return isinstance(snake_case__ , tf.Tensor )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case__ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(snake_case__ )
return type(snake_case__ ) == tf.Tensor
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Tuple:
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> Optional[int]:
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case__ , jnp.ndarray )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> Optional[Any]:
return False if not is_flax_available() else _is_jax(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> int:
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_py_obj(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return [to_py_obj(snake_case__ ) for o in obj]
elif is_tf_tensor(snake_case__ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ ).tolist()
elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] ) -> Dict:
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_numpy(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return np.array(snake_case__ )
elif is_tf_tensor(snake_case__ ):
return obj.numpy()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ )
else:
return obj
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
_lowercase = getattr(self ,class_fields[0].name )
_lowercase = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A ,__A ):
_lowercase = first_field.items()
_lowercase = True
else:
try:
_lowercase = iter(__A )
_lowercase = True
except TypeError:
_lowercase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A ,(list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0] ,__A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowercase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
_lowercase = element[1]
elif first_field is not None:
_lowercase = first_field
else:
for field in class_fields:
_lowercase = getattr(self ,field.name )
if v is not None:
_lowercase = v
def __delitem__( self : List[str] ,*__A : Dict ,**__A : Tuple ) -> List[Any]:
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __UpperCAmelCase ( self : Optional[int] ,*__A : Tuple ,**__A : Dict ) -> List[str]:
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __UpperCAmelCase ( self : List[Any] ,*__A : List[str] ,**__A : str ) -> Optional[Any]:
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __UpperCAmelCase ( self : List[Any] ,*__A : List[Any] ,**__A : Union[str, Any] ) -> Any:
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Optional[int] ,__A : Dict ) -> Union[str, Any]:
if isinstance(__A ,__A ):
_lowercase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] ,__A : str ,__A : Optional[Any] ) -> Any:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A ,__A )
super().__setattr__(__A ,__A )
def __setitem__( self : Any ,__A : List[Any] ,__A : List[str] ) -> Optional[Any]:
# Will raise a KeyException if needed
super().__setitem__(__A ,__A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A ,__A )
def __UpperCAmelCase ( self : str ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : str ,__A : int ) -> List[str]:
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '''longest'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''max_length'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''do_not_pad'''
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''pt'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''tf'''
SCREAMING_SNAKE_CASE_ : List[str] = '''np'''
SCREAMING_SNAKE_CASE_ : Any = '''jax'''
class A_ :
"""simple docstring"""
def __init__( self : List[str] ,__A : List[ContextManager] ) -> List[str]:
_lowercase = context_managers
_lowercase = ExitStack()
def __enter__( self : Any ) -> List[str]:
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : Tuple ,*__A : List[str] ,**__A : Any ) -> Tuple:
self.stack.__exit__(*__A ,**__A )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = infer_framework(snake_case__ )
if framework == "tf":
_lowercase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowercase = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowercase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> str:
_lowercase = model_class.__name__
_lowercase = infer_framework(snake_case__ )
if framework == "tf":
_lowercase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowercase = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowercase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :MutableMapping , snake_case__ :str = "" , snake_case__ :str = "." ) -> List[str]:
def _flatten_dict(snake_case__ :Tuple , snake_case__ :Tuple="" , snake_case__ :List[Any]="." ):
for k, v in d.items():
_lowercase = str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k
if v and isinstance(snake_case__ , snake_case__ ):
yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :bool = False ) -> str:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Union[str, Any]=None ) -> Tuple:
if is_numpy_array(snake_case__ ):
return np.transpose(snake_case__ , axes=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.T if axes is None else array.permute(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.transpose(snake_case__ , perm=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.transpose(snake_case__ , axes=snake_case__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(snake_case__ )}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :Tuple ) -> Union[str, Any]:
if is_numpy_array(snake_case__ ):
return np.reshape(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.reshape(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.reshape(snake_case__ , snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.reshape(snake_case__ , snake_case__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(snake_case__ )}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int=None ) -> Dict:
if is_numpy_array(snake_case__ ):
return np.squeeze(snake_case__ , axis=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.squeeze(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.squeeze(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(snake_case__ )}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Tuple ) -> str:
if is_numpy_array(snake_case__ ):
return np.expand_dims(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.unsqueeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.expand_dims(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.expand_dims(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(snake_case__ )}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict ) -> Optional[Any]:
if is_numpy_array(snake_case__ ):
return np.size(snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.numel()
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.size(snake_case__ )
elif is_jax_tensor(snake_case__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(snake_case__ )}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Dict ) -> Optional[Any]:
for key, value in auto_map.items():
if isinstance(snake_case__ , (tuple, list) ):
_lowercase = [F"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowercase = F"""{repo_id}--{value}"""
return auto_map
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Union[str, Any]:
for base_class in inspect.getmro(snake_case__ ):
_lowercase = base_class.__module__
_lowercase = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 535
|
from itertools import count
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 50 ) -> int:
_lowercase = [1] * min_block_length
for n in count(snake_case__ ):
fill_count_functions.append(1 )
for block_length in range(snake_case__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 535
| 1
|
_lowerCamelCase : Union[str, Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Dict = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 87
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15
| 0
|
def UpperCamelCase ( snake_case__ : Any = 200_0000 ):
'''simple docstring'''
__snake_case :Tuple = [0 for i in range(n + 1 )]
__snake_case :Tuple = 1
__snake_case :List[str] = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,_lowerCamelCase ):
__snake_case :List[str] = 1
__snake_case :Any = 0
for i in range(_lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716
|
import os
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = os.path.dirname(os.path.realpath(snake_case__ ) )
__snake_case :Union[str, Any] = os.path.join(snake_case__ ,"""triangle.txt""" )
with open(snake_case__ ) as f:
__snake_case :int = f.readlines()
__snake_case :int = []
for line in triangle:
__snake_case :List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 ,len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__snake_case :Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case :Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ ,snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 291
| 0
|
'''simple docstring'''
lowercase__ = 0 # The first color of the flag.
lowercase__ = 1 # The second color of the flag.
lowercase__ = 2 # The third color of the flag.
lowercase__ = (red, white, blue)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if not sequence:
return []
if len(_SCREAMING_SNAKE_CASE ) == 1:
return list(_SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = 0
snake_case : Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
snake_case : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case ,snake_case : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case ,snake_case : List[str] = sequence[high], sequence[mid]
high -= 1
else:
snake_case : Optional[Any] = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("Enter numbers separated by commas:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 638
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any]=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : str=30 , __lowercase : Optional[Any]=400 , __lowercase : Dict=True , __lowercase : int=None , __lowercase : Tuple=True , __lowercase : Optional[Any]=None , __lowercase : List[str]=True , __lowercase : List[Any]=[0.5, 0.5, 0.5] , __lowercase : Any=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {"""shortest_edge""": 18}
__a = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 225
| 0
|
lowerCamelCase : List[str] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 303
|
import sys
from collections import defaultdict
class snake_case__ :
def __init__( self : List[Any] ):
snake_case__ : Dict = []
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Tuple ):
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str ):
snake_case__ : Union[str, Any] = pos
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : Optional[int] = 2 * start + 1
else:
snake_case__ : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ , snake_case__ : int = heap[smallest_child], positions[smallest_child]
snake_case__ , snake_case__ : str = (
heap[start],
positions[start],
)
snake_case__ , snake_case__ : int = temp, tempa
snake_case__ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _lowerCamelCase )
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ):
snake_case__ : Optional[Any] = position[index]
while index != 0:
snake_case__ : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : Optional[Any] = heap[parent]
snake_case__ : Dict = position[parent]
self.set_position(position[parent] , _lowerCamelCase )
else:
snake_case__ : Tuple = val
snake_case__ : Optional[Any] = temp
self.set_position(_lowerCamelCase , _lowerCamelCase )
break
snake_case__ : Optional[int] = parent
else:
snake_case__ : List[str] = val
snake_case__ : List[Any] = temp
self.set_position(_lowerCamelCase , 0 )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
snake_case__ : int = len(_lowerCamelCase ) // 2 - 1
for i in range(_lowerCamelCase , -1 , -1 ):
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
snake_case__ : Any = positions[0]
snake_case__ : List[str] = sys.maxsize
self.top_to_bottom(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase )
return temp
def lowercase__( A ):
snake_case__ : int = Heap()
snake_case__ : Optional[int] = [0] * len(A )
snake_case__ : Any = [-1] * len(A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : Dict = []
for vertex in range(len(A ) ):
distance_tv.append(sys.maxsize )
positions.append(A )
heap.node_position.append(A )
snake_case__ : Tuple = []
snake_case__ : int = 1
snake_case__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = distance
heap.heapify(A , A )
for _ in range(1 , len(A ) ):
snake_case__ : Tuple = heap.delete_minimum(A , A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : List[str] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A )]
):
snake_case__ : Any = distance
heap.bottom_to_top(
A , heap.get_position(A ) , A , A )
snake_case__ : Union[str, Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase : Union[str, Any] = int(input('Enter number of edges: ').strip())
lowerCamelCase : str = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'ylacombe/bark-small'
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = 'en_speaker_1'
lowerCamelCase_ = 'This is a test string'
lowerCamelCase_ = 'speaker_embeddings_path.json'
lowerCamelCase_ = 'speaker_embeddings'
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 35
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
'semantic_prompt': np.ones(SCREAMING_SNAKE_CASE_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 42
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase: int = logging.get_logger(__name__)
class UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" ,_lowerCAmelCase ,)
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
| 709
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowercase : Union[str, Any] = mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_lowercase : Tuple = max(
mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , mf_knapsack(i - 1 , __UpperCAmelCase , __UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
_lowercase : int = val
return f[i][j]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowercase : Optional[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowercase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(__UpperCAmelCase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
_lowercase : List[str] = len(__UpperCAmelCase )
if num_items != len(__UpperCAmelCase ):
_lowercase : Union[str, Any] = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(__UpperCAmelCase )} values"""
)
raise ValueError(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
if not isinstance(wt[i] , __UpperCAmelCase ):
_lowercase : List[str] = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__UpperCAmelCase )
_lowercase , _lowercase : Dict = knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowercase : set = set()
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return optimal_val, example_optional_set
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase )
else:
optimal_set.add(__UpperCAmelCase )
_construct_solution(__UpperCAmelCase , __UpperCAmelCase , i - 1 , j - wt[i - 1] , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: str = [3, 2, 4, 4]
UpperCAmelCase: Dict = [4, 3, 2, 3]
UpperCAmelCase: Optional[int] = 4
UpperCAmelCase: Optional[int] = 6
UpperCAmelCase: Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase: List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 600
| 0
|
def lowerCamelCase__ ( _a):
if len(_a) <= 1:
return lst
SCREAMING_SNAKE_CASE : List[str] = 1
while i < len(_a):
if lst[i - 1] <= lst[i]:
i += 1
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
SCREAMING_SNAKE_CASE : Optional[Any] = 1
return lst
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 25
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class A_ ( __lowercase ):
'''simple docstring'''
def __init__( self , **_A) -> List[Any]:
"""simple docstring"""
super().__init__(**_A)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , '''vision''')
self.check_model_type(_A)
def __call__( self , _A , _A = None , **_A , ) -> Optional[Any]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCAmelCase : int = kwargs.pop('''text_queries''')
if isinstance(_A , (str, Image.Image)):
_UpperCAmelCase : int = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_UpperCAmelCase : int = image
_UpperCAmelCase : Dict = super().__call__(_A , **_A)
return results
def snake_case__ ( self , **_A) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = {}
if "threshold" in kwargs:
_UpperCAmelCase : List[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
_UpperCAmelCase : List[str] = kwargs['''top_k''']
return {}, {}, postprocess_params
def snake_case__ ( self , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_image(inputs['''image'''])
_UpperCAmelCase : Optional[Any] = inputs['''candidate_labels''']
if isinstance(_A , _A):
_UpperCAmelCase : Tuple = candidate_labels.split(''',''')
_UpperCAmelCase : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(_A):
_UpperCAmelCase : List[Any] = self.tokenizer(_A , return_tensors=self.framework)
_UpperCAmelCase : str = self.image_processor(_A , return_tensors=self.framework)
yield {
"is_last": i == len(_A) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case__ ( self , _A) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = model_inputs.pop('''target_size''')
_UpperCAmelCase : List[Any] = model_inputs.pop('''candidate_label''')
_UpperCAmelCase : Optional[Any] = model_inputs.pop('''is_last''')
_UpperCAmelCase : Dict = self.model(**_A)
_UpperCAmelCase : str = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def snake_case__ ( self , _A , _A=0.1 , _A=None) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = []
for model_output in model_outputs:
_UpperCAmelCase : int = model_output['''candidate_label''']
_UpperCAmelCase : Any = BaseModelOutput(_A)
_UpperCAmelCase : Optional[int] = self.image_processor.post_process_object_detection(
outputs=_A , threshold=_A , target_sizes=model_output['''target_size'''])[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase : Optional[int] = outputs['''scores'''][index].item()
_UpperCAmelCase : Dict = self._get_bounding_box(outputs['''boxes'''][index][0])
_UpperCAmelCase : Any = {'''score''': score, '''label''': label, '''box''': box}
results.append(_A)
_UpperCAmelCase : int = sorted(_A , key=lambda _A: x["score"] , reverse=_A)
if top_k:
_UpperCAmelCase : str = results[:top_k]
return results
def snake_case__ ( self , _A) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''')
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = box.int().tolist()
_UpperCAmelCase : Optional[Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 485
| 0
|
a_ :dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
f"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(A__ )}"""
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :Tuple = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_UpperCamelCase )
or left < -len(_UpperCamelCase )
or right >= len(_UpperCamelCase )
or right < -len(_UpperCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__lowerCAmelCase = (left + right) >> 1 # the middle
__lowerCAmelCase = find_max(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # find max in range[left, mid]
__lowerCAmelCase = find_max(_UpperCamelCase , mid + 1 , _UpperCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 636
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A : List[str] = pytest.mark.integration
A : Optional[Any] = {"comet"}
A : int = importlib.util.find_spec("fairseq") is not None
A : Union[str, Any] = {"code_eval"}
A : Dict = os.name == "nt"
A : Dict = {"bertscore", "frugalscore", "perplexity"}
A : Any = importlib.util.find_spec("transformers") is not None
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self , _UpperCamelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , _UpperCamelCase )
return wrapper
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
@local
class _UpperCamelCase ( parameterized.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any ={}
__UpperCAmelCase : List[str] =None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def snake_case ( self , __a ):
__lowerCAmelCase = "[...]"
__lowerCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __a ) ).module_path )
__lowerCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__lowerCAmelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = "[...]"
__lowerCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case ( self , __a , __a ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def snake_case ( self ):
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join("metrics" , __a ) , *__a , **__a )
with patch("datasets.load_metric" ) as mock_load_metric:
__lowerCAmelCase = load_local_metric
yield
@classmethod
def snake_case ( cls , __a ):
def wrapper(__a ):
__lowerCAmelCase = contextmanager(__a )
__lowerCAmelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self , __a ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
__lowerCAmelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
import torch
def bert_cos_score_idf(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
__lowerCAmelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def load_from_checkpoint(_UpperCamelCase ):
class _UpperCamelCase :
'''simple docstring'''
def snake_case ( self , __a , *__a , **__a ):
assert len(__a ) == 2
__lowerCAmelCase = [0.1_9, 0.9_2]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
__lowerCAmelCase = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
__lowerCAmelCase = load_from_checkpoint
yield
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = load_metric(os.path.join("metrics" , "seqeval" ) )
__lowerCAmelCase = "ERROR"
__lowerCAmelCase = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_UpperCamelCase )
| 636
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__magic_name__ :Optional[Any] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(snake_case ):
os.makedirs(snake_case )
__magic_name__ :Optional[Any] = model.state_dict()
def to_tf_var_name(snake_case ):
for patt, repl in iter(snake_case ):
__magic_name__ :Dict = name.replace(snake_case, snake_case )
return f'''bert/{name}'''
def create_tf_var(snake_case, snake_case, snake_case ):
__magic_name__ :str = tf.dtypes.as_dtype(tensor.dtype )
__magic_name__ :Dict = tf.get_variable(dtype=snake_case, shape=tensor.shape, name=snake_case, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__magic_name__ :Any = to_tf_var_name(snake_case )
__magic_name__ :List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__magic_name__ :Optional[int] = torch_tensor.T
__magic_name__ :Union[str, Any] = create_tf_var(tensor=snake_case, name=snake_case, session=snake_case )
tf.keras.backend.set_value(snake_case, snake_case )
__magic_name__ :str = session.run(snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(snake_case, snake_case )}''' )
__magic_name__ :List[str] = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case, os.path.join(snake_case, model_name.replace('''-''', '''_''' ) + '''.ckpt''' ) )
def __lowercase ( snake_case=None ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''', type=snake_case, required=snake_case, help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''', type=snake_case, default=snake_case, required=snake_case, help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''', type=snake_case, required=snake_case, help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''', type=snake_case, required=snake_case, help='''Directory in which to save tensorflow model''' )
__magic_name__ :str = parser.parse_args(snake_case )
__magic_name__ :Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=snake_case, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 180
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowercase ( snake_case ):
"""simple docstring"""
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def __lowercase ( snake_case ):
"""simple docstring"""
for char in word:
__magic_name__ :Union[str, Any] = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Any = set()
for token in tokens:
__magic_name__ :Dict = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
__magic_name__ :str = list(snake_case )
return word_list
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ :int = max([len(snake_case ) for w in chinese_word_set] )
__magic_name__ :Any = bert_tokens
__magic_name__ , __magic_name__ :List[Any] = 0, len(snake_case )
while start < end:
__magic_name__ :str = True
if is_chinese(bert_word[start] ):
__magic_name__ :Optional[Any] = min(end - start, snake_case )
for i in range(snake_case, 1, -1 ):
__magic_name__ :Any = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
__magic_name__ :Optional[int] = '''##''' + bert_word[j]
__magic_name__ :List[str] = start + i
__magic_name__ :Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for i in range(0, len(snake_case ), 1_0_0 ):
__magic_name__ :List[str] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
__magic_name__ :List[str] = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
__magic_name__ :Union[str, Any] = []
for i in range(0, len(snake_case ), 1_0_0 ):
__magic_name__ :str = bert_tokenizer(lines[i : i + 1_0_0], add_special_tokens=snake_case, truncation=snake_case, max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(snake_case ) == len(snake_case )
__magic_name__ :Union[str, Any] = []
for input_ids, chinese_word in zip(snake_case, snake_case ):
__magic_name__ :Any = []
for id in input_ids:
__magic_name__ :List[Any] = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
__magic_name__ :Dict = add_sub_symbol(snake_case, snake_case )
__magic_name__ :Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
__magic_name__ :Optional[int] = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def __lowercase ( snake_case ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
__magic_name__ :Union[str, Any] = f.readlines()
__magic_name__ :int = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ :List[Any] = LTP(args.ltp ) # faster in GPU device
__magic_name__ :List[str] = BertTokenizer.from_pretrained(args.bert )
__magic_name__ :Optional[int] = prepare_ref(snake_case, snake_case, snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
__magic_name__ :Dict = [json.dumps(snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
main(args)
| 180
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowercase__ ( lowerCamelCase : List[Any] ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = 4
lowerCAmelCase__ : List[Any] = 4_8
lowerCAmelCase__ : Union[str, Any] = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase__ : int = [6, 6, 6, 6]
lowerCAmelCase__ : List[Any] = 6_0
lowerCAmelCase__ : List[str] = [6, 6, 6, 6]
lowerCAmelCase__ : Union[str, Any] = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase__ : Any = 4
lowerCAmelCase__ : Union[str, Any] = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : Dict = 1_2_6
lowerCAmelCase__ : Dict = 7
lowerCAmelCase__ : Optional[Any] = 2_55.0
lowerCAmelCase__ : Optional[int] = ""
return config
def lowercase__ ( lowerCamelCase : str , lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ : Dict = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
lowerCAmelCase__ : Dict = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
lowerCAmelCase__ : Any = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
lowerCAmelCase__ : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCAmelCase__ : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCAmelCase__ : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase__ : Dict = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase__ : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase__ : Tuple = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
lowerCAmelCase__ : List[Any] = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
lowerCAmelCase__ : List[str] = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
lowerCAmelCase__ : Optional[int] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
lowerCAmelCase__ : int = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
lowerCAmelCase__ : int = "layernorm.weight"
if name == "norm.bias":
lowerCAmelCase__ : List[str] = "layernorm.bias"
if "conv_first" in name:
lowerCAmelCase__ : str = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase__ : int = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase__ : Tuple = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
lowerCAmelCase__ : int = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
lowerCAmelCase__ : str = name.replace("upsample.2" , "upsample.convolution_1" )
lowerCAmelCase__ : Union[str, Any] = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase__ : Dict = name.replace("upsample.0.weight" , "upsample.conv.weight" )
lowerCAmelCase__ : List[Any] = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
lowerCAmelCase__ : str = "swin2sr." + name
return name
def lowercase__ ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ) -> int:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Union[str, Any] = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
lowerCAmelCase__ : Optional[int] = key.split("." )
lowerCAmelCase__ : List[Any] = int(key_split[1] )
lowerCAmelCase__ : int = int(key_split[4] )
lowerCAmelCase__ : Union[str, Any] = config.embed_dim
if "weight" in key:
lowerCAmelCase__ : List[Any] = val[:dim, :]
lowerCAmelCase__ : str = val[dim : dim * 2, :]
lowerCAmelCase__ : Optional[Any] = val[-dim:, :]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : Union[str, Any] = val[dim : dim * 2]
lowerCAmelCase__ : Union[str, Any] = val[-dim:]
pass
else:
lowerCAmelCase__ : List[str] = val
return orig_state_dict
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Any ) -> List[Any]:
lowerCAmelCase__ : Tuple = get_config(lowerCamelCase )
lowerCAmelCase__ : Optional[int] = SwinaSRForImageSuperResolution(lowerCamelCase )
model.eval()
lowerCAmelCase__ : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="cpu" )
lowerCAmelCase__ : Optional[int] = convert_state_dict(lowerCamelCase , lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError("Missing keys when converting: {}".format(lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
lowerCAmelCase__ : str = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
lowerCAmelCase__ : int = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert("RGB" )
lowerCAmelCase__ : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase__ : Optional[int] = 1_2_6 if "Jpeg" in checkpoint_url else 2_5_6
lowerCAmelCase__ : Optional[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCAmelCase__ : List[str] = transforms(lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase__ : Optional[int] = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase__ : Dict = model(lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase__ : Dict = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase__ : Tuple = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCAmelCase__ : Any = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase__ : List[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCAmelCase__ : Any = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase__ : str = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowerCAmelCase__ : Tuple = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase__ : List[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase , atol=1e-3 )
print("Looks ok!" )
lowerCAmelCase__ : List[Any] = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
lowerCAmelCase__ : List[str] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
__UpperCAmelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 308
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : dict , lowerCamelCase : dict , lowerCamelCase : dict , ) -> list:
_validation(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# Creates data structures and fill initial step
lowerCAmelCase__ : dict = {}
lowerCAmelCase__ : dict = {}
for state in states_space:
lowerCAmelCase__ : Optional[int] = observations_space[0]
lowerCAmelCase__ : Dict = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ : Tuple = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCamelCase ) ):
lowerCAmelCase__ : Dict = observations_space[o]
lowerCAmelCase__ : Dict = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ : int = ""
lowerCAmelCase__ : Union[str, Any] = -1
for k_state in states_space:
lowerCAmelCase__ : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ : int = probability
lowerCAmelCase__ : List[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ : List[str] = arg_max
# The final observation
lowerCAmelCase__ : Optional[int] = observations_space[len(lowerCamelCase ) - 1]
# argmax for given final observation
lowerCAmelCase__ : Dict = ""
lowerCAmelCase__ : Dict = -1
for k_state in states_space:
lowerCAmelCase__ : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ : Any = probability
lowerCAmelCase__ : Any = k_state
lowerCAmelCase__ : Optional[int] = arg_max
# Process pointers backwards
lowerCAmelCase__ : Tuple = last_state
lowerCAmelCase__ : Dict = []
for o in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
result.append(lowerCamelCase )
lowerCAmelCase__ : Optional[int] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , ) -> None:
_validate_not_empty(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
_validate_lists(lowerCamelCase , lowerCamelCase )
_validate_dicts(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : Any ) -> None:
_validate_list(lowerCamelCase , "observations_space" )
_validate_list(lowerCamelCase , "states_space" )
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : str ) -> None:
if not isinstance(_object , lowerCamelCase ):
lowerCAmelCase__ : Optional[int] = F"{var_name} must be a list"
raise ValueError(lowerCamelCase )
else:
for x in _object:
if not isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase__ : Tuple = F"{var_name} must be a list of strings"
raise ValueError(lowerCamelCase )
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Any , ) -> None:
_validate_dict(lowerCamelCase , "initial_probabilities" , lowerCamelCase )
_validate_nested_dict(lowerCamelCase , "transition_probabilities" )
_validate_nested_dict(lowerCamelCase , "emission_probabilities" )
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : str ) -> None:
_validate_dict(_object , lowerCamelCase , lowerCamelCase )
for x in _object.values():
_validate_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : type , lowerCamelCase : bool = False ) -> None:
if not isinstance(_object , lowerCamelCase ):
lowerCAmelCase__ : Tuple = F"{var_name} must be a dict"
raise ValueError(lowerCamelCase )
if not all(isinstance(lowerCamelCase , lowerCamelCase ) for x in _object ):
lowerCAmelCase__ : str = F"{var_name} all keys must be strings"
raise ValueError(lowerCamelCase )
if not all(isinstance(lowerCamelCase , lowerCamelCase ) for x in _object.values() ):
lowerCAmelCase__ : str = "nested dictionary " if nested else ""
lowerCAmelCase__ : int = F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : int = ["image_processor", "tokenizer"]
UpperCamelCase : List[str] = "FlavaImageProcessor"
UpperCamelCase : List[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __magic_name__=None , __magic_name__=None , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __magic_name__ , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.image_processor
def __call__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
_lowerCAmelCase = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def _lowerCamelCase ( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __magic_name__ , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __magic_name__ , )
return self.image_processor
| 309
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : str = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Optional[int] = "mvp"
UpperCamelCase : Optional[Any] = ["past_key_values"]
UpperCamelCase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __magic_name__=5_0_2_6_7 , __magic_name__=1_0_2_4 , __magic_name__=1_2 , __magic_name__=4_0_9_6 , __magic_name__=1_6 , __magic_name__=1_2 , __magic_name__=4_0_9_6 , __magic_name__=1_6 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__="gelu" , __magic_name__=1_0_2_4 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=0.0 , __magic_name__=False , __magic_name__=True , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=True , __magic_name__=2 , __magic_name__=2 , __magic_name__=False , __magic_name__=1_0_0 , __magic_name__=8_0_0 , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase = use_prompt
_lowerCAmelCase = prompt_length
_lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __magic_name__ ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 309
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : int=7 , lowerCamelCase__ : int=3 , lowerCamelCase__ : int=18 , lowerCamelCase__ : List[Any]=30 , lowerCamelCase__ : Tuple=400 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=None , lowerCamelCase__ : str=True , ):
a__ : Any = size if size is not None else {"""height""": 18, """width""": 18}
a__ : List[str] = parent
a__ : Any = batch_size
a__ : Tuple = num_channels
a__ : Tuple = image_size
a__ : List[str] = min_resolution
a__ : Optional[Any] = max_resolution
a__ : Tuple = do_resize
a__ : int = size
a__ : Optional[int] = apply_ocr
def _UpperCamelCase( self : int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase( self : Optional[int] ):
a__ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase( self : Tuple ):
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "apply_ocr" ) )
def _UpperCamelCase( self : Dict ):
a__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
a__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase( self : str ):
pass
def _UpperCamelCase( self : int ):
a__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , snake_case_ )
self.assertIsInstance(encoding.boxes , snake_case_ )
# Test batched
a__ : List[Any] = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
a__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Dict = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase( self : int ):
a__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
a__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Tuple = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase( self : Dict ):
a__ : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
a__ : str = Image.open(ds[0]["file"] ).convert("RGB" )
a__ : List[Any] = image_processing(snake_case_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a__ : Optional[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
a__ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case_ )
self.assertListEqual(encoding.boxes , snake_case_ )
# with apply_OCR = False
a__ : str = LayoutLMvaImageProcessor(apply_ocr=snake_case_ )
a__ : Tuple = image_processing(snake_case_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 37
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = 'RegNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE_ = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE_ = [1, 10_88, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE_ = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = 1 , snake_case_ = "relu" , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Optional[int] = nn.Convad(
snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=kernel_size // 2 , groups=snake_case_ , bias=snake_case_ , )
__UpperCAmelCase: Any = nn.BatchNormad(snake_case_ )
__UpperCAmelCase: Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self.convolution(snake_case_ )
__UpperCAmelCase: str = self.normalization(snake_case_ )
__UpperCAmelCase: Optional[Any] = self.activation(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[str] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__UpperCAmelCase: Tuple = config.num_channels
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__UpperCAmelCase: int = self.embedder(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 2 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Any = nn.Convad(snake_case_ , snake_case_ , kernel_size=1 , stride=snake_case_ , bias=snake_case_ )
__UpperCAmelCase: Union[str, Any] = nn.BatchNormad(snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[str] = self.convolution(snake_case_ )
__UpperCAmelCase: Optional[int] = self.normalization(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
__UpperCAmelCase: Union[str, Any] = nn.Sequential(
nn.Convad(snake_case_ , snake_case_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(snake_case_ , snake_case_ , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.pooler(snake_case_ )
__UpperCAmelCase: List[Any] = self.attention(snake_case_ )
__UpperCAmelCase: int = hidden_state * attention
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Union[str, Any] = in_channels != out_channels or stride != 1
__UpperCAmelCase: Optional[Any] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase: List[str] = (
RegNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase: Optional[int] = nn.Sequential(
RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act ) , RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
__UpperCAmelCase: Optional[Any] = ACTaFN[config.hidden_act]
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = hidden_state
__UpperCAmelCase: List[str] = self.layer(snake_case_ )
__UpperCAmelCase: Any = self.shortcut(snake_case_ )
hidden_state += residual
__UpperCAmelCase: List[str] = self.activation(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Dict = in_channels != out_channels or stride != 1
__UpperCAmelCase: List[str] = max(1 , out_channels // config.groups_width )
__UpperCAmelCase: int = (
RegNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase: Tuple = nn.Sequential(
RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ , groups=snake_case_ , activation=config.hidden_act ) , RegNetSELayer(snake_case_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
__UpperCAmelCase: Tuple = ACTaFN[config.hidden_act]
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = hidden_state
__UpperCAmelCase: List[Any] = self.layer(snake_case_ )
__UpperCAmelCase: int = self.shortcut(snake_case_ )
hidden_state += residual
__UpperCAmelCase: Optional[Any] = self.activation(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: Tuple = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
__UpperCAmelCase: Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
snake_case_ , snake_case_ , snake_case_ , stride=snake_case_ , ) , *[layer(snake_case_ , snake_case_ , snake_case_ ) for _ in range(depth - 1 )] , )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = self.layers(snake_case_ )
return hidden_state
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase: List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__UpperCAmelCase: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case_ , config.depths[1:] ):
self.stages.append(RegNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ ) )
def lowercase_ ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase: Any = hidden_states + (hidden_state,)
__UpperCAmelCase: Dict = stage_module(snake_case_ )
if output_hidden_states:
__UpperCAmelCase: Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = RegNetConfig
__lowerCAmelCase = """regnet"""
__lowerCAmelCase = """pixel_values"""
__lowerCAmelCase = True
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Dict = value
SCREAMING_SNAKE_CASE_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: Dict = config
__UpperCAmelCase: List[str] = RegNetEmbeddings(snake_case_ )
__UpperCAmelCase: List[str] = RegNetEncoder(snake_case_ )
__UpperCAmelCase: Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase: str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase: Tuple = self.embedder(snake_case_ )
__UpperCAmelCase: List[str] = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
__UpperCAmelCase: List[Any] = encoder_outputs[0]
__UpperCAmelCase: Dict = self.pooler(snake_case_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ ):
'''simple docstring'''
super().__init__(snake_case_ )
__UpperCAmelCase: int = config.num_labels
__UpperCAmelCase: List[str] = RegNetModel(snake_case_ )
# classification head
__UpperCAmelCase: Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase: List[str] = self.regnet(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
__UpperCAmelCase: Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase: List[Any] = self.classifier(snake_case_ )
__UpperCAmelCase: str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase: int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase: Optional[Any] = """single_label_classification"""
else:
__UpperCAmelCase: Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
__UpperCAmelCase: Any = MSELoss()
if self.num_labels == 1:
__UpperCAmelCase: Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__UpperCAmelCase: Tuple = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase: Dict = CrossEntropyLoss()
__UpperCAmelCase: int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase: List[str] = BCEWithLogitsLoss()
__UpperCAmelCase: Any = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
__UpperCAmelCase: List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
| 523
| 0
|
"""simple docstring"""
import requests
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Tuple = {'''Content-Type''': '''application/json'''}
__lowercase : List[str] = requests.post(_lowerCAmelCase , json={'''text''': message_body} , headers=_lowerCAmelCase )
if response.status_code != 2_00:
__lowercase : int = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 701
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def __UpperCAmelCase ( ):
__lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__UpperCamelCase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__UpperCamelCase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__UpperCamelCase , help='''where to store parsed gold_data_path file''' , )
__lowercase : Dict = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__lowercase : str = json.load(__UpperCamelCase )
for dpr_record in tqdm(__UpperCamelCase ):
__lowercase : Optional[int] = dpr_record['''question''']
__lowercase : Optional[int] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__UpperCamelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 523
| 0
|
'''simple docstring'''
def _snake_case ( A ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase__ = sum(A ) / len(A ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
|
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Union[str, Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__magic_name__ : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowercase__ ( _UpperCamelCase) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
with open(_UpperCamelCase , 'r') as file:
for line_number, line in enumerate(_UpperCamelCase):
UpperCamelCase = line.strip()
if line:
UpperCamelCase = line.split()
UpperCamelCase = line_number
UpperCamelCase = words[0]
UpperCamelCase = value
return result
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Dict:
"""simple docstring"""
for attribute in key.split('.'):
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase):
UpperCamelCase = PARAM_MAPPING[full_name.split('.')[-1]]
UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase = hf_pointer
for attribute in hf_param_name.split('.'):
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = shape_pointer.shape
# let's reduce dimension
UpperCamelCase = value[0]
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}')
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.'):
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> List[str]:
"""simple docstring"""
UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase):
UpperCamelCase = PARAM_MAPPING[full_name.split('.')[-1]]
UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase = '.'.join([key, weight_type])
elif weight_type is not None and weight_type == "param":
UpperCamelCase = '.'.join([key, hf_param_name])
else:
UpperCamelCase = key
UpperCamelCase = value if 'lm_head' in full_key else value[0]
__magic_name__ : Dict = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None) -> Any:
"""simple docstring"""
UpperCamelCase = False
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(_UpperCamelCase)[0].split('.')[-2]
UpperCamelCase = mapped_key.replace('*' , _UpperCamelCase)
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
else:
UpperCamelCase = None
if hf_dict is not None:
rename_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
else:
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
return is_used
return is_used
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
UpperCamelCase = load_wavaveca_layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if not is_used:
unused_weights.append(_UpperCamelCase)
logger.warning(F'Unused weights: {unused_weights}')
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = full_name.split('conv_layers.')[-1]
UpperCamelCase = name.split('.')
UpperCamelCase = int(items[0])
UpperCamelCase = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.')
UpperCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.')
UpperCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.')
UpperCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.')
UpperCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_UpperCamelCase)
@torch.no_grad()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False) -> Tuple:
"""simple docstring"""
if config_path is not None:
UpperCamelCase = WavaVecaConfig.from_pretrained(_UpperCamelCase)
else:
UpperCamelCase = WavaVecaConfig()
if is_seq_class:
UpperCamelCase = read_txt_into_dict(_UpperCamelCase)
UpperCamelCase = idalabel
UpperCamelCase = WavaVecaForSequenceClassification(_UpperCamelCase)
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
feature_extractor.save_pretrained(_UpperCamelCase)
elif is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(_UpperCamelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols)
UpperCamelCase = os.path.join(_UpperCamelCase , 'vocab.json')
if not os.path.isdir(_UpperCamelCase):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase))
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase)
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(_UpperCamelCase , 'w' , encoding='utf-8') as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase)
processor.save_pretrained(_UpperCamelCase)
UpperCamelCase = WavaVecaForCTC(_UpperCamelCase)
else:
UpperCamelCase = WavaVecaForPreTraining(_UpperCamelCase)
if is_finetuned or is_seq_class:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining')
UpperCamelCase = fairseq.tasks.setup_task(_UpperCamelCase)
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCamelCase)
UpperCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , not is_finetuned)
hf_wavavec.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__magic_name__ : int = parser.parse_args()
__magic_name__ : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 410
|
import argparse
import os
import re
import packaging.version
__magic_name__ : Dict = '''examples/'''
__magic_name__ : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__magic_name__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__magic_name__ : int = '''README.md'''
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.read()
UpperCamelCase , UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace('VERSION' , _UpperCamelCase)
UpperCamelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase)
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.write(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(_UpperCamelCase):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects')
if "legacy" in directories:
directories.remove('legacy')
for fname in fnames:
if fname.endswith('.py'):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase , pattern='examples')
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=False) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if not patch:
update_version_in_examples(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = '🤗 Transformers currently provides the following architectures'
UpperCamelCase = '1. Want to contribute a new model?'
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith('1.'):
UpperCamelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r') as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS['init'][0].search(_UpperCamelCase).groups()[0]
return packaging.version.parse(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase=False) -> str:
"""simple docstring"""
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!')
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(F'Which version are you releasing? [{default_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = default_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase , patch=_UpperCamelCase)
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCamelCase = get_version()
UpperCamelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(F'Which version are we developing now? [{dev_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = dev_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase)
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__magic_name__ : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 410
| 1
|
'''simple docstring'''
__lowerCAmelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def _UpperCAmelCase ( __A : dict , __A : Optional[Any] , __A : Tuple ):
a_ : List[str] = set()
# keep track of all the paths to be checked
a_ : List[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
a_ : List[Any] = queue.pop(0 )
# get the last node from the path
a_ : Union[str, Any] = path[-1]
if node not in explored:
a_ : Optional[int] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
a_ : Union[str, Any] = list(__A )
new_path.append(__A )
queue.append(__A )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__A )
# in case there's no path between the 2 nodes
return []
def _UpperCAmelCase ( __A : dict , __A : Optional[int] , __A : List[Any] ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
a_ : Optional[int] = [start]
a_ : Dict = set(__A )
# Keep tab on distances from `start` node.
a_ : Optional[Any] = {start: 0, target: -1}
while queue:
a_ : List[str] = queue.pop(0 )
if node == target:
a_ : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__A )
queue.append(__A )
a_ : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 466
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = {'facebook/bart-base': BartForConditionalGeneration}
__lowerCAmelCase = {'facebook/bart-base': BartTokenizer}
def _UpperCAmelCase ( ):
a_ : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__A , default=__A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__A , default=__A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--config_name''' , type=__A , default=__A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__A , default=__A , help='''Where to store the final ONNX file.''' )
a_ : Tuple = parser.parse_args()
return args
def _UpperCAmelCase ( __A : List[str] , __A : Dict="cpu" ):
a_ : str = model_dict[model_name].from_pretrained(__A ).to(__A )
a_ : str = tokenizer_dict[model_name].from_pretrained(__A )
if model_name in ["facebook/bart-base"]:
a_ : str = 0
a_ : Any = None
a_ : int = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( __A : int , __A : Dict , __A : str , __A : Optional[Any] , __A : Any ):
model.eval()
a_ : Union[str, Any] = None
a_ : str = torch.jit.script(BARTBeamSearchGenerator(__A ) )
with torch.no_grad():
a_ : Any = '''My friends are cool but they eat too many carbs.'''
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
a_ : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__A , max_length=__A , early_stopping=__A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__A , )
logger.info('''Model exported to {}'''.format(__A ) )
a_ : Any = remove_dup_initializers(os.path.abspath(__A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__A ) )
a_ : int = onnxruntime.InferenceSession(__A )
a_ : Optional[Any] = ort_sess.run(
__A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__A ),
'''max_length''': np.array(__A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCAmelCase ( ):
a_ : int = parse_args()
a_ : List[str] = 5
a_ : List[str] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
a_ : Any = torch.device(args.device )
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , __A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__A )
if args.max_length:
a_ : Dict = args.max_length
if args.num_beams:
a_ : str = args.num_beams
if args.output_file_path:
a_ : str = args.output_file_path
else:
a_ : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__A , __A , __A , __A , __A )
if __name__ == "__main__":
main()
| 466
| 1
|
"""simple docstring"""
_snake_case : Optional[int] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 711
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
_lowerCAmelCase = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 1000
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = num_labels
_lowerCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = _lowerCAmelCase = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
_lowerCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
_lowerCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCAmelCase = [2, 2, 20]
_lowerCAmelCase = [3, 12, 16]
_lowerCAmelCase = [192, 768, 1024]
_lowerCAmelCase = CvtForImageClassification(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
_lowerCAmelCase = image_size
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCAmelCase = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
_lowerCAmelCase = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_snake_case = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 491
| 0
|
from datetime import datetime
import requests
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
snake_case_ = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_A ).content
if __name__ == "__main__":
_UpperCAmelCase : int = input("""Enter Video/IGTV url: """).strip()
_UpperCAmelCase : int = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 362
|
from string import ascii_uppercase
A_: int = {char: i for i, char in enumerate(ascii_uppercase)}
A_: Optional[Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = len(_A )
_lowercase = 0
while True:
if x == i:
_lowercase = 0
if len(_A ) == len(_A ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = """"""
_lowercase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowercase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = """"""
_lowercase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowercase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = """THE GERMAN ATTACK"""
_lowercase = """SECRET"""
_lowercase = generate_key(_A ,_A )
_lowercase = cipher_text(_A ,_A )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(_A ,_A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 398
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = LxmertTokenizer
A_ : Any = LxmertTokenizerFast
A_ : Optional[int] = True
A_ : int = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
super().setUp()
__A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Any ):
'''simple docstring'''
__A = '''UNwant\u00E9d,running'''
__A = '''unwanted, running'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = '''I was born in 92000, and this is falsé.'''
__A = tokenizer.tokenize(_lowerCamelCase )
__A = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
__A = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
__A = rust_tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(_lowerCamelCase )
__A = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
| 215
| 0
|
'''simple docstring'''
import argparse
import os
import re
_lowerCamelCase : int = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_lowerCamelCase : Union[str, Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_lowerCamelCase : Optional[Any] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( A__ , A__ = False ) -> Any:
"""simple docstring"""
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.read()
UpperCamelCase = content.split('\n' )
UpperCamelCase = []
UpperCamelCase = 0
while line_idx < len(A__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase = sorted(A__ , key=lambda A__ : _re_identifier.search(A__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
elif "\n".join(A__ ) != content:
return True
def __lowerCamelCase ( A__ = False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [os.path.join(A__ , A__ ) for f in os.listdir(A__ ) if f.endswith('.py' )]
UpperCamelCase = [sort_auto_mapping(A__ , overwrite=A__ ) for fname in fnames]
if not overwrite and any(A__ ):
UpperCamelCase = [f for f, d in zip(A__ , A__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(A__ )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowerCamelCase : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 430
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase = 4
# running values
UpperCamelCase = []
def A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase = (1.0 - self.betas**2) ** 0.5
UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase = timesteps.to(UpperCamelCase__ )
UpperCamelCase = []
def A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase = (self.timesteps == timestep).nonzero().item()
UpperCamelCase = timestep_index + 1
UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
UpperCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCamelCase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = self.alphas[timestep_index]
UpperCamelCase = self.betas[timestep_index]
UpperCamelCase = self.alphas[prev_timestep_index]
UpperCamelCase = self.betas[prev_timestep_index]
UpperCamelCase = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
UpperCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 430
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
snake_case_ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = '''weight_g'''
elif "weight_v" in name:
snake_case_ = '''weight_v'''
elif "bias" in name:
snake_case_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = '''weight'''
else:
snake_case_ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = full_name.split('''conv_layers.''' )[-1]
snake_case_ = name.split('''.''' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True )-> Dict:
"""simple docstring"""
if config_path is not None:
snake_case_ = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatConfig()
snake_case_ = ''''''
if is_finetuned:
snake_case_ = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE )
else:
snake_case_ = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 700
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase = get_logger()
UpperCAmelCase = None
class lowerCAmelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
super().__init__(features=_UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(_UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
snake_case_ = device if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
snake_case_ = str(jax.devices()[0] )
snake_case_ = jnp_array_kwargs
@staticmethod
def UpperCamelCase__ ( ):
import jax
return {str(_UpperCAmelCase ): device for device in jax.devices()}
def UpperCamelCase__ ( self , _UpperCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and column:
if all(
isinstance(_UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_UpperCAmelCase , axis=0 )
return column
def UpperCamelCase__ ( self , _UpperCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(_UpperCAmelCase , (str, bytes, type(_UpperCAmelCase )) ):
return value
elif isinstance(_UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case_ = {}
if isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case_ = {'''dtype''': jnp.intaa}
else:
snake_case_ = {'''dtype''': jnp.intaa}
elif isinstance(_UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case_ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
snake_case_ = np.asarray(_UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_UpperCAmelCase , '''__array__''' ) and not isinstance(_UpperCAmelCase , jax.Array ):
snake_case_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return map_nested(self._recursive_tensorize , _UpperCAmelCase , map_list=_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.numpy_arrow_extractor().extract_row(_UpperCAmelCase )
snake_case_ = self.python_features_decoder.decode_row(_UpperCAmelCase )
return self.recursive_tensorize(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.numpy_arrow_extractor().extract_column(_UpperCAmelCase )
snake_case_ = self.python_features_decoder.decode_column(_UpperCAmelCase , pa_table.column_names[0] )
snake_case_ = self.recursive_tensorize(_UpperCAmelCase )
snake_case_ = self._consolidate(_UpperCAmelCase )
return column
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.numpy_arrow_extractor().extract_batch(_UpperCAmelCase )
snake_case_ = self.python_features_decoder.decode_batch(_UpperCAmelCase )
snake_case_ = self.recursive_tensorize(_UpperCAmelCase )
for column_name in batch:
snake_case_ = self._consolidate(batch[column_name] )
return batch
| 531
| 0
|
import qiskit
def _lowerCamelCase ( __lowerCamelCase = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qubits
# Using Aer's simulator
UpperCAmelCase__ : Tuple = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
UpperCAmelCase__ : Tuple = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCamelCase ) ) , list(range(__lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 79
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''ViltImageProcessor'''
_a = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , A_ : str=None , A_ : Any=None , **A_ : Dict):
lowerCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
lowerCAmelCase_ : Optional[int] = kwargs.pop('''feature_extractor''')
lowerCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A_ , A_)
lowerCAmelCase_ : Any = self.image_processor
def __call__( self : str , A_ : str , A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A_ : bool = True , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Union[bool, str, TruncationStrategy] = None , A_ : Optional[int] = None , A_ : int = 0 , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : Optional[Union[str, TensorType]] = None , **A_ : Optional[Any] , ):
lowerCAmelCase_ : List[str] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : List[Any] = self.image_processor(A_ , return_tensors=A_)
encoding.update(A_)
return encoding
def UpperCAmelCase__ ( self : Tuple , *A_ : Union[str, Any] , **A_ : Any):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : Optional[Any] , *A_ : List[str] , **A_ : Union[str, Any]):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[str] = self.tokenizer.model_input_names
lowerCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Dict):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A_ , )
return self.image_processor
| 171
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "timesformer"
def __init__( self : Any , _lowercase : Tuple=2_24 , _lowercase : Tuple=16 , _lowercase : List[Any]=3 , _lowercase : str=8 , _lowercase : Optional[Any]=7_68 , _lowercase : List[str]=12 , _lowercase : List[str]=12 , _lowercase : Dict=30_72 , _lowercase : List[str]="gelu" , _lowercase : List[str]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Tuple=0.02 , _lowercase : List[Any]=1E-6 , _lowercase : List[str]=True , _lowercase : Dict="divided_space_time" , _lowercase : Union[str, Any]=0 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = attention_type
SCREAMING_SNAKE_CASE__ = drop_path_rate
| 710
|
import os
__lowerCamelCase : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
while index < len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
with open(os.path.dirname(__UpperCamelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ = line.strip()
SCREAMING_SNAKE_CASE__ = parse_roman_numerals(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = generate_roman_numerals(__UpperCamelCase )
savings += len(__UpperCamelCase ) - len(__UpperCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 379
| 0
|
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
if not (isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : List[str] = len(lowerCAmelCase )
UpperCAmelCase__ : int = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : Tuple = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCAmelCase__ : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCAmelCase__ : Tuple = i
UpperCAmelCase__ : Optional[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = """▁"""
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BigBirdTokenizer
SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ : Any = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """<s>"""
UpperCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(_lowerCamelCase ) , 1004 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ : Dict = tokenizer.tokenize(_lowerCamelCase )
UpperCAmelCase__ : int = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _a (self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """Hello World!"""
UpperCAmelCase__ : Union[str, Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCAmelCase__ : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def _a (self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase__ : str = """ """.join(_lowerCamelCase )
UpperCAmelCase__ : int = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors="""pt""" , return_token_type_ids=_lowerCamelCase )
UpperCAmelCase__ : str = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = BigBirdConfig(attention_type="""original_full""" )
UpperCAmelCase__ : Optional[int] = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCAmelCase__ : int = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = {"""input_ids""": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 182
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = DanceDiffusionPipeline
__UpperCamelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__UpperCamelCase : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=snake_case_ , use_timestep_embedding=snake_case_ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_: Optional[int] = IPNDMScheduler()
UpperCamelCase_: Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] , snake_case_ : int=0 ):
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: List[Any] = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: Tuple = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Any = self.get_dummy_components()
UpperCamelCase_: Optional[Any] = DanceDiffusionPipeline(**snake_case_ )
UpperCamelCase_: str = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs(snake_case_ )
UpperCamelCase_: Union[str, Any] = pipe(**snake_case_ )
UpperCamelCase_: Optional[int] = output.audios
UpperCamelCase_: str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_: Tuple = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Any ):
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase__ ( self : Any ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCAmelCase__ ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase__ ( self : str ):
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase__ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = torch_device
UpperCamelCase_: Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[Any] = torch.manual_seed(0 )
UpperCamelCase_: int = pipe(generator=snake_case_ , num_inference_steps=100 , audio_length_in_s=4.096 )
UpperCamelCase_: Optional[int] = output.audios
UpperCamelCase_: Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_: List[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = torch_device
UpperCamelCase_: Union[str, Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Optional[int] = torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = pipe(generator=snake_case_ , num_inference_steps=100 , audio_length_in_s=4.096 )
UpperCamelCase_: Optional[Any] = output.audios
UpperCamelCase_: Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_: List[Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 670
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCamelCase__ = 'src/transformers'
# Matches is_xxx_available()
UpperCamelCase__ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCamelCase__ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase__ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCamelCase__ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase__ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase__ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase__ = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase__ = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCamelCase__ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCamelCase__ = re.compile(r'^\s*try:')
# Catches a line with else:
UpperCamelCase__ = re.compile(r'^\s*else:')
def lowerCamelCase ( _snake_case ):
if _re_test_backend.search(_snake_case ) is None:
return None
UpperCAmelCase__ : Union[str, Any] = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def lowerCamelCase ( _snake_case ):
with open(_snake_case ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
UpperCAmelCase__ : List[Any] = f.readlines()
UpperCAmelCase__ : Union[str, Any] = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase__ : str = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase__ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
UpperCAmelCase__ : Optional[int] = _re_one_line_import_struct.search(_snake_case ).groups()[0]
UpperCAmelCase__ : str = re.findall('\[([^\]]+)\]' ,_snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
UpperCAmelCase__ : List[str] = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
UpperCAmelCase__ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase__ : Union[str, Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase__ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
UpperCAmelCase__ : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
UpperCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(', ' )
UpperCAmelCase__ : Tuple = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
UpperCAmelCase__ : Optional[Any] = _re_between_brackets.search(_snake_case ).groups()[0].split(', ' )
UpperCAmelCase__ : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase__ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase__ : Tuple = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
UpperCAmelCase__ : str = lines[line_index]
UpperCAmelCase__ : Optional[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase__ : List[Any] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase__ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
UpperCAmelCase__ : List[Any] = lines[line_index]
UpperCAmelCase__ : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase__ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase ( _snake_case ,_snake_case ):
def find_duplicates(_snake_case ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase__ : int = []
for key in import_dict_objects.keys():
UpperCAmelCase__ : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCAmelCase__ : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase__ : Any = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCamelCase ( ):
UpperCAmelCase__ : Optional[Any] = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
UpperCAmelCase__ : List[Any] = os.path.join(_snake_case ,'__init__.py' )
UpperCAmelCase__ : Tuple = parse_init(_snake_case )
if objects is not None:
UpperCAmelCase__ : Any = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
UpperCAmelCase__ : Optional[int] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('\n\n'.join(_snake_case ) )
def lowerCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('*.py' ) ) ) == 0:
continue
UpperCAmelCase__ : str = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
UpperCAmelCase__ : Dict = short_path.replace(os.path.sep ,'.' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase__ : List[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
UpperCAmelCase__ : Union[str, Any] = short_path.replace('.py' ,'' ).replace(os.path.sep ,'.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCamelCase__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Tuple = importlib.util.spec_from_file_location(
'transformers' ,os.path.join(_snake_case ,'__init__.py' ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
UpperCAmelCase__ : int = spec.loader.load_module()
UpperCAmelCase__ : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
UpperCAmelCase__ : List[Any] = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 110
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : int = IFImgaImgSuperResolutionPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : int = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 110
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __snake_case ( _UpperCAmelCase=None ):
if subparsers is not None:
__a = subparsers.add_parser('''env''' )
else:
__a = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __snake_case ( _UpperCAmelCase ):
__a = torch.__version__
__a = torch.cuda.is_available()
__a = is_xpu_available()
__a = is_npu_available()
__a = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__a = load_config_from_file(args.config_file ).to_dict()
__a = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'{pt_version} ({pt_cuda_available})',
'''PyTorch XPU available''': str(_UpperCAmelCase ),
'''PyTorch NPU available''': str(_UpperCAmelCase ),
'''System RAM''': f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
__a = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__a = (
'''\n'''.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else f'\t{accelerate_config}'
)
print(_UpperCAmelCase )
__a = accelerate_config
return info
def __snake_case ( ):
__a = env_command_parser()
__a = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 60
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
| 1
|
def _lowerCamelCase ( __lowerCamelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 79
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : List[str] = torch.load(A , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE : List[Any] = torch.load(A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE : Optional[int] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
SCREAMING_SNAKE_CASE : int = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE : Optional[int] = sd.pop(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE : str = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE : Any = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = torch.split(A , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = q
SCREAMING_SNAKE_CASE : List[str] = k
SCREAMING_SNAKE_CASE : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase ( A : int , A : Union[str, Any] , A : Dict=None ):
SCREAMING_SNAKE_CASE : Optional[Any] = load_checkpoint(A )
if config is not None:
SCREAMING_SNAKE_CASE : Tuple = OPTConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : int = OPTConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowerCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 527
| 0
|
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ : Optional[int] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : int = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
snake_case_ : str = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
def remove_articles(SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCAmelCase_ : Any = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE__, ''' ''', SCREAMING_SNAKE_CASE__ )
def white_space_fix(SCREAMING_SNAKE_CASE__ : str ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
UpperCAmelCase_ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE__ : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
return int(normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ ) )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
UpperCAmelCase_ : Tuple = [any(compute_exact(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )]
return (sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )) * 100
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase_ : Any = Counter(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = Counter(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase_ : Optional[Any] = scount * numref
UpperCAmelCase_ : List[str] = Counter(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase_ : Any = ccount * numref
# KEEP
UpperCAmelCase_ : List[str] = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase_ : Tuple = keepgramcounter_rep & rgramcounter
UpperCAmelCase_ : Any = sgramcounter_rep & rgramcounter
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
UpperCAmelCase_ : Optional[Any] = keeptmpscorea / len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase_ : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase_ : List[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase_ : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase_ : List[Any] = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase_ : Optional[Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase_ : List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Tuple = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ : Union[str, Any] = 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
UpperCAmelCase_ : Union[str, Any] = deltmpscorea / len(SCREAMING_SNAKE_CASE__ )
# ADDITION
UpperCAmelCase_ : str = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = set(SCREAMING_SNAKE_CASE__ ) & set(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
UpperCAmelCase_ : str = addtmpscore / len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
UpperCAmelCase_ : Optional[Any] = addtmpscore / len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase_ : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
UpperCAmelCase_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = ssent.split(''' ''' )
UpperCAmelCase_ : Tuple = csent.split(''' ''' )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
for rsent in rsents:
UpperCAmelCase_ : List[Any] = rsent.split(''' ''' )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Optional[int] = []
ragramslist.append(SCREAMING_SNAKE_CASE__ )
for i in range(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 2:
UpperCAmelCase_ : Any = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 3:
UpperCAmelCase_ : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE__ )
ragramslist.append(SCREAMING_SNAKE_CASE__ )
ragramslist.append(SCREAMING_SNAKE_CASE__ )
ragramslist.append(SCREAMING_SNAKE_CASE__ )
for i in range(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 2:
UpperCAmelCase_ : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 3:
UpperCAmelCase_ : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE__ )
for i in range(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
UpperCAmelCase_ : Any = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 2:
UpperCAmelCase_ : int = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE__ )
if i < len(SCREAMING_SNAKE_CASE__ ) - 3:
UpperCAmelCase_ : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE__ )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Any = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase_ : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase_ : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase_ : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : bool = True, SCREAMING_SNAKE_CASE__ : str = "13a", SCREAMING_SNAKE_CASE__ : bool = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase_ : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase_ : Optional[Any] = sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE__ )()(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE__ )
elif tokenizer == "moses":
UpperCAmelCase_ : Optional[int] = sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE__, return_str=SCREAMING_SNAKE_CASE__, escape=SCREAMING_SNAKE_CASE__ )
elif tokenizer == "penn":
UpperCAmelCase_ : int = sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE__, return_str=SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase_ : List[Any] = sentence
if not return_str:
UpperCAmelCase_ : Any = normalized_sent.split()
return normalized_sent
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
if not (len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
UpperCAmelCase_ : Union[str, Any] = 0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE__ ), normalize(SCREAMING_SNAKE_CASE__ ), [normalize(SCREAMING_SNAKE_CASE__ ) for sent in refs] )
UpperCAmelCase_ : Optional[int] = sari_score / len(SCREAMING_SNAKE_CASE__ )
return 100 * sari_score
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]="exp", SCREAMING_SNAKE_CASE__ : List[str]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=False, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Optional[int]=False, ) -> str:
UpperCAmelCase_ : Any = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCAmelCase_ : int = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase_ : int = sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, smooth_method=SCREAMING_SNAKE_CASE__, smooth_value=SCREAMING_SNAKE_CASE__, force=SCREAMING_SNAKE_CASE__, lowercase=SCREAMING_SNAKE_CASE__, use_effective_order=SCREAMING_SNAKE_CASE__, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a (datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = {}
result.update({'''sari''': compute_sari(sources=__magic_name__ , predictions=__magic_name__ , references=__magic_name__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__magic_name__ , references=__magic_name__ )} )
result.update({'''exact''': compute_em(predictions=__magic_name__ , references=__magic_name__ )} )
return result
| 644
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase__ ( lowercase , lowercase=10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
for _ in range(lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase__ ( lowercase , lowercase=10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
for step in range(lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = os.path.join(lowercase , "schedule.bin" )
torch.save(scheduler.state_dict() , lowercase )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowercase )
scheduler.load_state_dict(lowercase )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE : Dict = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE : List[str] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase_ , weight_decay=0.0 , relative_step=UpperCAmelCase_ , scale_parameter=UpperCAmelCase_ , warmup_init=UpperCAmelCase_ , )
for _ in range(1000 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = criterion(UpperCAmelCase_ , UpperCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCamelCase_ : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase_ : str = 1_0
def _A ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertAlmostEqual(UpperCAmelCase_ , UpperCAmelCase_ , delta=UpperCAmelCase_ , msg=UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = data
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_func(self.optimizer , **UpperCAmelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = unwrap_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase_ , UpperCAmelCase_ , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_func(self.optimizer , **UpperCAmelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase_ ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE : Optional[Any] = unwrap_and_save_reload_schedule(UpperCAmelCase_ , self.num_steps )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ , msg=f'''failed for {scheduler_func} in save and reload''' )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = fn
def __call__( self : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
return self.fn(*UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _A ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(map(self , scheduler.lr_lambdas ) )
| 62
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _SCREAMING_SNAKE_CASE ( snake_case = "mumbai" ) -> Generator[tuple[str, str], None, None]:
_UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
_UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 518
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
from math import pi, sqrt, tan
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
_a : str = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(UpperCAmelCase , 2 ) * torus_radius * tube_radius
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
_a : List[str] = (sidea + sidea + sidea) / 2
_a : Union[str, Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 307
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = tempfile.mkdtemp()
__a : List[Any] = 8
# DPR tok
__a : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : Optional[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__a : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a : str = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__a : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a : Optional[int] = {'''unk_token''': '''<unk>'''}
__a : Dict = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
__a : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__a : str = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase (self : Union[str, Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCAmelCase (self : List[Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCAmelCase (self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCAmelCase (self : List[str] ):
__a : int = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
__a : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__a : Optional[int] = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : List[Any] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
__a : List[Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a : Dict = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase (self : List[Any] ):
__a : str = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
__a : List[Any] = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 521
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = {'facebook/bart-base': BartForConditionalGeneration}
__lowerCAmelCase = {'facebook/bart-base': BartTokenizer}
def _UpperCAmelCase ( ):
a_ : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__A , default=__A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__A , default=__A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--config_name''' , type=__A , default=__A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__A , default=__A , help='''Where to store the final ONNX file.''' )
a_ : Tuple = parser.parse_args()
return args
def _UpperCAmelCase ( __A : List[str] , __A : Dict="cpu" ):
a_ : str = model_dict[model_name].from_pretrained(__A ).to(__A )
a_ : str = tokenizer_dict[model_name].from_pretrained(__A )
if model_name in ["facebook/bart-base"]:
a_ : str = 0
a_ : Any = None
a_ : int = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( __A : int , __A : Dict , __A : str , __A : Optional[Any] , __A : Any ):
model.eval()
a_ : Union[str, Any] = None
a_ : str = torch.jit.script(BARTBeamSearchGenerator(__A ) )
with torch.no_grad():
a_ : Any = '''My friends are cool but they eat too many carbs.'''
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
a_ : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__A , max_length=__A , early_stopping=__A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__A , )
logger.info('''Model exported to {}'''.format(__A ) )
a_ : Any = remove_dup_initializers(os.path.abspath(__A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__A ) )
a_ : int = onnxruntime.InferenceSession(__A )
a_ : Optional[Any] = ort_sess.run(
__A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__A ),
'''max_length''': np.array(__A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCAmelCase ( ):
a_ : int = parse_args()
a_ : List[str] = 5
a_ : List[str] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
a_ : Any = torch.device(args.device )
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , __A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__A )
if args.max_length:
a_ : Dict = args.max_length
if args.num_beams:
a_ : str = args.num_beams
if args.output_file_path:
a_ : str = args.output_file_path
else:
a_ : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__A , __A , __A , __A , __A )
if __name__ == "__main__":
main()
| 466
| 0
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float = 1 / sqrt(2 ) ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = (1 - _cos) / 2
_lowerCAmelCase = 1 - _cos
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float = 1 / sqrt(2 ) ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = (1 + _cos) / 2
_lowerCAmelCase = -1 - _cos
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float = 1 / sqrt(2 ) ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = _sin / 2
_lowerCAmelCase = 0
_lowerCAmelCase = -ba
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float = 1 / sqrt(2 ) ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 1 - alpha
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 + alpha
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float , __lowerCamelCase :float = 1 / sqrt(2 ) , ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 10 ** (gain_db / 40)
_lowerCAmelCase = 1 + alpha * big_a
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha * big_a
_lowerCAmelCase = 1 + alpha / big_a
_lowerCAmelCase = -2 * _cos
_lowerCAmelCase = 1 - alpha / big_a
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float , __lowerCamelCase :float = 1 / sqrt(2 ) , ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 10 ** (gain_db / 40)
_lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase = 2 * sqrt(__lowerCamelCase ) * alpha
_lowerCAmelCase = big_a * (pmc + aaa)
_lowerCAmelCase = 2 * big_a * mpc
_lowerCAmelCase = big_a * (pmc - aaa)
_lowerCAmelCase = ppmc + aaa
_lowerCAmelCase = -2 * pmpc
_lowerCAmelCase = ppmc - aaa
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :float , __lowerCamelCase :float = 1 / sqrt(2 ) , ):
_lowerCAmelCase = tau * frequency / samplerate
_lowerCAmelCase = sin(__lowerCamelCase )
_lowerCAmelCase = cos(__lowerCamelCase )
_lowerCAmelCase = _sin / (2 * q_factor)
_lowerCAmelCase = 10 ** (gain_db / 40)
_lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase = 2 * sqrt(__lowerCamelCase ) * alpha
_lowerCAmelCase = big_a * (ppmc + aaa)
_lowerCAmelCase = -2 * big_a * pmpc
_lowerCAmelCase = big_a * (ppmc - aaa)
_lowerCAmelCase = pmc + aaa
_lowerCAmelCase = 2 * mpc
_lowerCAmelCase = pmc - aaa
_lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 701
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def A (__lowerCamelCase :np.ndarray , __lowerCamelCase :tuple[int, int] , __lowerCamelCase :tuple[int, int] , __lowerCamelCase :bool , ):
_lowerCAmelCase , _lowerCAmelCase = grid.shape
_lowerCAmelCase = [-1, 1, 0, 0]
_lowerCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCAmelCase , _lowerCAmelCase = [(0, source)], set()
_lowerCAmelCase = np.full((rows, cols) , np.inf )
_lowerCAmelCase = 0
_lowerCAmelCase = np.empty((rows, cols) , dtype=__lowerCamelCase )
_lowerCAmelCase = None
while queue:
((_lowerCAmelCase) , (_lowerCAmelCase)) = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_lowerCAmelCase , _lowerCAmelCase = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
_lowerCAmelCase = dist + 1
_lowerCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 0
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__SCREAMING_SNAKE_CASE = """\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"""
__SCREAMING_SNAKE_CASE = """\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"""
__SCREAMING_SNAKE_CASE = """\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[int]=None , UpperCamelCase__ :Optional[int]=True , UpperCamelCase__ :List[str]=False ):
if rouge_types is None:
_a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE_ , use_stemmer=SCREAMING_SNAKE_CASE_ )
if use_aggregator:
_a = scoring.BootstrapAggregator()
else:
_a = []
for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_a = scorer.score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if use_aggregator:
aggregator.add_scores(SCREAMING_SNAKE_CASE_ )
else:
scores.append(SCREAMING_SNAKE_CASE_ )
if use_aggregator:
_a = aggregator.aggregate()
else:
_a = {}
for key in scores[0]:
_a = [score[key] for score in scores]
return result
| 388
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
lowerCAmelCase__ = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
lowerCAmelCase__ = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def _lowercase (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ = evaluate(dataset=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ )
return score
| 626
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Any ={
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =[
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowercase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 550
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Optional[Any] ={
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] =[
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 550
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : int ) ->int:
assert (
isinstance(a , a ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
snake_case , snake_case = 1, 1
for _ in range(number_of_steps - 1 ):
snake_case , snake_case = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase = 250_004
_lowercase = 250_020
@require_sentencepiece
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = MBartTokenizer
_UpperCAmelCase = MBartTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = MBartTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ) -> int:
snake_case = MBartTokenizer(A__ , keep_accents=A__ )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
snake_case = tempfile.mkdtemp()
snake_case = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
snake_case = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case = tokenizer_r.from_pretrained(A__ )
snake_case = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = '''facebook/mbart-large-en-ro'''
_UpperCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_UpperCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_UpperCAmelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCamelCase ( cls ) -> Optional[Any]:
snake_case = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
snake_case = 1
return cls
def UpperCamelCase ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertIn(A__ , self.tokenizer.all_special_ids )
snake_case = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
snake_case = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def UpperCamelCase ( self ) -> Tuple:
snake_case = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , A__ )
snake_case = 10
snake_case = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A__ )
self.assertEqual(len(A__ ) , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def UpperCamelCase ( self ) -> Dict:
snake_case = tempfile.mkdtemp()
snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
snake_case = MBartTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors='''pt''' )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors='''pt''' )
snake_case = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors='''pt''' )
snake_case = targets['''input_ids''']
snake_case = shift_tokens_right(A__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 342
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = None
lowercase__ = None
_a = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__snake_case ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__snake_case ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__snake_case ) != count_coins(__snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__snake_case ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1 )
_UpperCamelCase , _UpperCamelCase = get_distrib(node.left )
_UpperCamelCase , _UpperCamelCase = get_distrib(node.right )
_UpperCamelCase = 1 - left_distrib_excess
_UpperCamelCase = 1 - right_distrib_excess
_UpperCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(__snake_case )
+ abs(__snake_case )
)
_UpperCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__snake_case, __snake_case )
return get_distrib(__snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = patch_size
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_UpperCamelCase = frequency_out_dimension * time_out_dimension
_UpperCamelCase = num_patches + 2
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ASTModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ASTModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ASTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.default_feature_extractor
_UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a)
_UpperCamelCase = self.default_feature_extractor
_UpperCamelCase , _UpperCamelCase = prepare_audio()
_UpperCamelCase = audio.squeeze().numpy()
_UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 5_27))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
| 78
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple , __a : int , __a : Dict ) -> Tuple:
return None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Any , __a : Optional[int] , __a : Optional[Any] , __a : Any ) -> Optional[Any]:
return None
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , "tf" , 12 , **__a )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , "pt" , 12 , **__a )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
from transformers import BertModel
_UpperCamelCase : str = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__a ) )
vocab_file.flush()
_UpperCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_UpperCamelCase : Optional[Any] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , "pt" , 12 , __a )
@require_tf
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_UpperCamelCase : str = self._test_export(__a , "tf" , 12 , **__a )
_UpperCamelCase : str = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_UpperCamelCase : Optional[Any] = self._test_export(__a , "pt" , 12 , **__a )
_UpperCamelCase : int = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Optional[Any]=None , **__a : str ) -> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_UpperCamelCase : Optional[Any] = Path(__a ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
from transformers import BertModel
_UpperCamelCase : Dict = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_UpperCamelCase : int = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__a , __a , "pt" )
@require_tf
@require_tokenizers
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
from transformers import TFBertModel
_UpperCamelCase : str = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_UpperCamelCase : Optional[int] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__a , __a , "tf" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] , __a : Dict , __a : Any ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = FeatureExtractionPipeline(__a , __a )
_UpperCamelCase : Tuple = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase : str = ["input_ids", "attention_mask", "token_type_ids"]
_UpperCamelCase : Dict = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_UpperCamelCase, _UpperCamelCase : Tuple = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
_UpperCamelCase : Tuple = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 624
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = False
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
lowerCamelCase__ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
lowerCamelCase__ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
lowerCamelCase__ = reader.read()
lowerCamelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
lowerCamelCase__ = UNetaDModel(**config)
else:
lowerCamelCase__ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
lowerCamelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ = config[key]
del config[key]
lowerCamelCase__ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
lowerCamelCase__ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
lowerCamelCase__ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
lowerCamelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
lowerCamelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
lowerCamelCase__ = param_value
lowerCamelCase__ = True
if not has_changed:
lowerCamelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 624
| 1
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ):
require_version(deps[pkg], SCREAMING_SNAKE_CASE_ )
| 720
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case = sys.version_info >= (3, 1_0)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ):
return field(default_factory=lambda: default, metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : str
UpperCAmelCase_ : bool
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int = 42
UpperCAmelCase_ : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[bool] = None
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = "titi"
UpperCAmelCase_ : str = "toto"
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = "titi"
UpperCAmelCase_ : Tuple = "toto"
UpperCAmelCase_ : Any = 42
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : BasicEnum = "toto"
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicEnum(self.foo )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : MixedTypeEnum = "toto"
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[float] = field(default=__magic_name__ , metadata={"help": "help message"} )
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] )
UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[int] = list_field(default=[] )
UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
UpperCAmelCase_ : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[int] = field()
UpperCAmelCase_ : str = field()
UpperCAmelCase_ : BasicEnum = field()
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : "BasicEnum" = field()
UpperCAmelCase_ : "Optional[bool]" = None
UpperCAmelCase_ : "str" = field(default="toto" , metadata={"help": "help message"} )
UpperCAmelCase_ : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : bool | None = None
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : int | None = None
UpperCAmelCase_ : float | None = field(default=__magic_name__ , metadata={"help": "help message"} )
UpperCAmelCase_ : str | None = None
UpperCAmelCase_ : list[str] | None = list_field(default=[] )
UpperCAmelCase_ : list[int] | None = list_field(default=[] )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self , lowercase__ , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowercase__ ).items() if k != 'container'}
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowercase__ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase__ ) and yy.get('choices' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase__ ) , yy['type'](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--bar' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--baz' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--flag' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
expected.add_argument('--baz' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase__ , dest='baz' )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
SCREAMING_SNAKE_CASE = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def A ( self ) -> Dict:
"""simple docstring"""
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase_ : Literal["titi", "toto", 42] = "toto"
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
SCREAMING_SNAKE_CASE = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--bar' , default=lowercase__ , type=lowercase__ , help='help message' )
expected.add_argument('--baz' , default=lowercase__ , type=lowercase__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase__ )
SCREAMING_SNAKE_CASE = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase__ , required=lowercase__ )
expected.add_argument('--required_str' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase__ , )
expected.add_argument('--opt' , type=lowercase__ , default=lowercase__ )
expected.add_argument('--baz' , default='toto' , type=lowercase__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
SCREAMING_SNAKE_CASE = parser.parse_dict(lowercase__ )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , 'temp_json' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
SCREAMING_SNAKE_CASE = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(lowercase__ , 'temp_yaml' )
os.mkdir(lowercase__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
SCREAMING_SNAKE_CASE = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 406
| 0
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( snake_case__ ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=99 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=5_12 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any="None" , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Any=None , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.get_config()
UpperCAmelCase = 3_00
return config
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = DebertaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )[0]
UpperCAmelCase = model(__snake_case , token_type_ids=__snake_case )[0]
UpperCAmelCase = model(__snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCAmelCase = DebertaForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = DebertaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case__, snake_case__, unittest.TestCase ):
lowercase : Optional[int] =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase : int =(
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Optional[Any] =True
lowercase : Any =False
lowercase : Dict =False
lowercase : str =False
lowercase : List[str] =False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = DebertaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DebertaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(__snake_case , attention_mask=__snake_case )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 323
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'deit'
def __init__( self , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-12 , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=True , __snake_case=1_6 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
snake_case = encoder_stride
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-4
| 550
| 0
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase_ : int = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowercase_ : int = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowercase_ : Any = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(snake_case__ , snake_case__ , sample_weight=snake_case__ ) ),
}
| 712
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.