code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__lowercase = logging.getLogger()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__UpperCamelCase :Any = parser.parse_args()
return args.f
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {}
__UpperCamelCase :str = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
__UpperCamelCase :Optional[Any] = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
__lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls) -> int:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase :List[Any] = tempfile.mkdtemp()
__UpperCamelCase :List[Any] = os.path.join(cls.tmpdir , '''default_config.yml''')
write_basic_config(save_location=cls.configPath)
__UpperCamelCase :int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase__ ( cls) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Any = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''')
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[Any] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''glue_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase :List[str] = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
__UpperCamelCase :Dict = get_results(__lowercase)
self.assertLess(result['''perplexity'''] , 100)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''clm_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :List[Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Union[str, Any] = get_results(__lowercase)
self.assertLess(result['''perplexity'''] , 42)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''mlm_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase :Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase :List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :List[Any] = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[Any] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertLess(result['''train_loss'''] , 0.5)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''ner_no_trainer''')))
@unittest.skip(reason='''Fix me @muellerzr''')
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :int = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Any = get_results(__lowercase)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28)
self.assertGreaterEqual(result['''eval_exact'''] , 28)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''qa_no_trainer''')))
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = self.get_auto_remove_tmp_dir()
__UpperCamelCase :int = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Dict = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''swag_no_trainer''')))
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Dict = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Tuple = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_rouge1'''] , 10)
self.assertGreaterEqual(result['''eval_rouge2'''] , 2)
self.assertGreaterEqual(result['''eval_rougeL'''] , 7)
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''summarization_no_trainer''')))
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Dict = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[int] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_bleu'''] , 30)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''translation_no_trainer''')))
@slow
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(__lowercase)
__UpperCamelCase :int = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Any = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs)
__UpperCamelCase :List[str] = get_results(__lowercase)
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10)
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase :Optional[Any] = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''')
run_command(self._launch_args + testargs)
__UpperCamelCase :Optional[Any] = get_results(__lowercase)
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6)
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''step_1''')))
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''image_classification_no_trainer''')))
| 43
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 86
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__(self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=1_3 , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=9_9 , __SCREAMING_SNAKE_CASE : Dict=1_6 , __SCREAMING_SNAKE_CASE : Any=3_6 , __SCREAMING_SNAKE_CASE : int=6 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : List[Any]=6 , __SCREAMING_SNAKE_CASE : Tuple=3_7 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=5_1_2 , __SCREAMING_SNAKE_CASE : Any=1_6 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = embedding_size
A = hidden_size
A = num_hidden_layers
A = num_hidden_groups
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length])
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A = ids_tensor([self.batch_size] , self.num_choices)
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ (self : Any):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ (self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str):
A = AlbertModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE)
A = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE)
A = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]):
A = AlbertForPreTraining(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , sentence_order_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str):
A = AlbertForMaskedLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ (self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
A = AlbertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str):
A = self.num_labels
A = AlbertForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A = self.num_labels
A = AlbertForTokenClassification(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
A = self.num_choices
A = AlbertForMultipleChoice(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ (self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=False):
A = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE)
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE):
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE)
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE)
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = AlbertModelTester(self)
A = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self : str):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = AlbertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ (self : Any):
A = AlbertModel.from_pretrained("albert-base-v2")
A = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
A = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
A = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
A = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 57
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( lowercase__=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("env" )
else:
A = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = torch.__version__
A = torch.cuda.is_available()
A = is_xpu_available()
A = is_npu_available()
A = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase__ ):
A = load_config_from_file(args.config_file ).to_dict()
A = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase__ ),
"PyTorch NPU available": str(lowercase__ ),
"System RAM": F"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
A = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase__ , lowercase__ )
else F"""\t{accelerate_config}"""
)
print(lowercase__ )
A = accelerate_config
return info
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = env_command_parser()
A = parser.parse_args()
env_command(lowercase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 57
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : List[str] ,A : List[Any]=7 ,A : Any=3 ,A : int=30 ,A : List[Any]=4_00 ,A : str=True ,A : int=None ,A : List[str]=0.9 ,A : Dict=None ,A : int=True ,A : Any=[0.5, 0.5, 0.5] ,A : Optional[int]=[0.5, 0.5, 0.5] ,):
__A = size if size is not None else {"shortest_edge": 30}
__A = crop_size if crop_size is not None else {"height": 30, "width": 30}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize_and_center_crop
__A = size
__A = crop_pct
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
def UpperCamelCase_ ( self : int ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
__A = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Tuple ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"crop_pct" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 124
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : List[str] ,A : List[Any]=7 ,A : Any=3 ,A : int=30 ,A : List[Any]=4_00 ,A : str=True ,A : int=None ,A : List[str]=0.9 ,A : Dict=None ,A : int=True ,A : Any=[0.5, 0.5, 0.5] ,A : Optional[int]=[0.5, 0.5, 0.5] ,):
__A = size if size is not None else {"shortest_edge": 30}
__A = crop_size if crop_size is not None else {"height": 30, "width": 30}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize_and_center_crop
__A = size
__A = crop_pct
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
def UpperCamelCase_ ( self : int ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
__A = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Tuple ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"crop_pct" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 124
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 212
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A__ ( __magic_name__ ):
lowercase = 'gptj'
lowercase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , a : Dict=50_400 , a : Union[str, Any]=2_048 , a : List[str]=4_096 , a : Any=28 , a : Optional[Any]=16 , a : Optional[Any]=64 , a : int=None , a : Any="gelu_new" , a : Union[str, Any]=0.0 , a : List[Any]=0.0 , a : List[Any]=0.0 , a : Optional[Any]=1E-5 , a : Optional[int]=0.0_2 , a : int=True , a : str=50_256 , a : str=50_256 , a : Any=False , **a : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : List[Any] = n_positions
lowerCAmelCase__ : str = n_embd
lowerCAmelCase__ : str = n_layer
lowerCAmelCase__ : str = n_head
lowerCAmelCase__ : Dict = n_inner
lowerCAmelCase__ : Union[str, Any] = rotary_dim
lowerCAmelCase__ : Optional[int] = activation_function
lowerCAmelCase__ : Any = resid_pdrop
lowerCAmelCase__ : int = embd_pdrop
lowerCAmelCase__ : int = attn_pdrop
lowerCAmelCase__ : List[Any] = layer_norm_epsilon
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : str = bos_token_id
lowerCAmelCase__ : int = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class A__ ( __magic_name__ ):
def __init__( self : str , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ):
'''simple docstring'''
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , 'pad_token_id' , a ):
# TODO: how to do that better?
lowerCAmelCase__ : int = 0
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs' )
lowerCAmelCase__ : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase__ : Tuple = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._config.n_head
def _lowerCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : Optional[int] = seqlen + 2
lowerCAmelCase__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : Tuple = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : Any = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase__ : List[str] = ordered_inputs['attention_mask'].dtype
lowerCAmelCase__ : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 13
| 212
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase : str = IFInpaintingSuperResolutionPipeline
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=0 ) ->str:
'''simple docstring'''
if str(lowercase__ ).startswith("mps" ):
_UpperCAmelCase : List[str] = torch.manual_seed(lowercase__ )
else:
_UpperCAmelCase : int = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
_UpperCAmelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 358
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A ='''▁'''
__A ={'''vocab_file''': '''spiece.model'''}
__A ={
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
__A ={
'''google/pegasus-xsum''': 5_1_2,
}
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCamelCase_ = additional_special_tokens_extended
else:
lowerCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = mask_token_sent
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE_( self ) -> Dict[str, int]:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCamelCase_ = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = []
lowerCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCamelCase_ = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_( self , lowercase=False ) -> Optional[Any]:
return 1
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
lowerCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 19
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19
| 1
|
"""simple docstring"""
import math
def __UpperCAmelCase ( lowercase ,lowercase = 0 ,lowercase = 0 ):
"""simple docstring"""
_UpperCAmelCase = end or len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ,_lowerCAmelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): # Max Heap
"""simple docstring"""
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase = array[largest], array[index]
heapify(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(n // 2 ,-1 ,-1 ):
heapify(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
for i in range(n - 1 ,0 ,-1 ):
_UpperCAmelCase = array[0], array[i]
heapify(_lowerCAmelCase ,0 ,_lowerCAmelCase )
return array
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase = array[j], array[i]
i += 1
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_lowerCAmelCase ) ) )
_UpperCAmelCase = 16
return intro_sort(_lowerCAmelCase ,0 ,len(_lowerCAmelCase ) ,_lowerCAmelCase ,_lowerCAmelCase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowerCAmelCase )
max_depth -= 1
_UpperCAmelCase = median_of_a(_lowerCAmelCase ,_lowerCAmelCase ,start + ((end - start) // 2) + 1 ,end - 1 )
_UpperCAmelCase = partition(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
intro_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
_UpperCAmelCase = p
return insertion_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input("""Enter numbers separated by a comma : """).strip()
UpperCAmelCase__ = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 353
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 50 ):
lowerCAmelCase : Optional[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 60
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase)
| 190
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336
| 1
|
from math import factorial
def __A ( __lowerCamelCase = 100 ) -> int:
return sum(map(__lowerCamelCase , str(factorial(__lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 228
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 228
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''poolformer'''
def __init__( self : Any , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Tuple=4.0 , __UpperCAmelCase : str=[2, 2, 6, 2] , __UpperCAmelCase : Optional[int]=[64, 128, 320, 512] , __UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , __UpperCAmelCase : Optional[int]=[4, 2, 2, 2] , __UpperCAmelCase : int=[2, 1, 1, 1] , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Tuple=0.02 , **__UpperCAmelCase : Any , ) ->Dict:
"""simple docstring"""
a = num_channels
a = patch_size
a = stride
a = padding
a = pool_size
a = hidden_sizes
a = mlp_ratio
a = depths
a = patch_sizes
a = strides
a = num_encoder_blocks
a = drop_path_rate
a = hidden_act
a = use_layer_scale
a = layer_scale_init_value
a = initializer_range
super().__init__(**__UpperCAmelCase )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : List[str] ) ->float:
"""simple docstring"""
return 2e-3
| 26
|
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26
| 1
|
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 213
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
print('Loading config file...' )
def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any="" , __SCREAMING_SNAKE_CASE : List[Any]="." ):
lowercase_ : List[str] = []
for k, v in d.items():
lowercase_ : Dict = parent_key + sep + k if parent_key else k
if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = argparse.Namespace()
with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file:
try:
lowercase_ : str = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
lowercase_ : List[Any] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = MobileViTVaConfig()
lowercase_ : List[str] = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowercase_ : List[Any] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : str = 3_84
else:
lowercase_ : Dict = 2_56
lowercase_ : int = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowercase_ : int = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : Optional[Any] = 3_84
else:
lowercase_ : Tuple = 2_56
lowercase_ : List[str] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowercase_ : int = 1_51
lowercase_ : Optional[Any] = 5_12
lowercase_ : str = 'ade20k-id2label.json'
lowercase_ : List[Any] = True
elif task_name.startswith('voc_' ):
lowercase_ : Union[str, Any] = 21
lowercase_ : Tuple = 5_12
lowercase_ : List[str] = 'pascal-voc-id2label.json'
lowercase_ : str = True
# orig_config
lowercase_ : Optional[int] = load_orig_config_file(__SCREAMING_SNAKE_CASE )
assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowercase_ : Optional[Any] = 'huggingface/label-files'
lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : int = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if base_model:
lowercase_ : int = ''
else:
lowercase_ : str = 'mobilevitv2.'
lowercase_ : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase_ : Dict = k[8:]
else:
lowercase_ : Union[str, Any] = k
if ".block." in k:
lowercase_ : List[str] = k_new.replace('.block.' , '.' )
if ".conv." in k:
lowercase_ : List[Any] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowercase_ : str = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowercase_ : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase_ : Any = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowercase_ : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase_ : Dict = [0, 1]
elif i == 4:
lowercase_ : int = [0, 1, 2, 3]
elif i == 5:
lowercase_ : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase_ : List[str] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase_ : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowercase_ : str = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowercase_ : Any = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowercase_ : List[str] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowercase_ : int = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowercase_ : str = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowercase_ : Union[str, Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowercase_ : Optional[int] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowercase_ : Dict = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowercase_ : Dict = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : str = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Tuple = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase_ : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowercase_ : Tuple = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Optional[int] = False
else:
lowercase_ : Any = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : int = False
# remove and rename some keys of load the original model
lowercase_ : Any = checkpoint
remove_unused_keys(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase_ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase_ : Any = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('imagenet' ):
lowercase_ : List[str] = outputs.logits
lowercase_ : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 213
| 1
|
from collections.abc import Callable
import numpy as np
def a( A : Callable , A : float , A : float , A : float , A : float ) -> np.array:
"""simple docstring"""
a = int(np.ceil((x_end - xa) / step_size ) )
a = np.zeros((n + 1,) )
a = ya
a = xa
for k in range(A ):
a = y[k] + step_size * ode_func(A , y[k] )
a = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = 8
# DPR tok
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase_ ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase_ (self ):
"""simple docstring"""
a = os.path.join(self.tmpdirname , "rag_tokenizer" )
a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
a = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
a = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
a = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
a = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
a = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 71
| 1
|
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCAmelCase__ :List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = pipeline(
'document-question-answering' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
lowerCAmelCase__ :List[Any] = 'What is the placebo?'
lowerCAmelCase__ :Dict = [
{
'image': load_image(__UpperCAmelCase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )},
{'score': ANY(__UpperCAmelCase ), 'answer': ANY(__UpperCAmelCase ), 'start': ANY(__UpperCAmelCase ), 'end': ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ :Union[str, Any] = INVOICE_URL
lowerCAmelCase__ :Tuple = 'How many cats are there?'
lowerCAmelCase__ :List[str] = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
lowerCAmelCase__ :Any = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
lowerCAmelCase__ :Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ :List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ :Dict = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ :List[str] = []
lowerCAmelCase__ :int = []
lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCAmelCase__ :str = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.00_09, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
lowerCAmelCase__ :List[Any] = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :Optional[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.99_48, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , )
lowerCAmelCase__ :List[str] = INVOICE_URL
lowerCAmelCase__ :Any = 'What is the invoice number?'
lowerCAmelCase__ :List[Any] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowerCAmelCase__ :Optional[int] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowerCAmelCase__ :List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
lowerCAmelCase__ :Dict = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ :Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.08_19, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=__UpperCAmelCase , revision='3dc6de3' , max_seq_len=5_0 , )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :List[Any] = 'What is the invoice number?'
lowerCAmelCase__ :List[str] = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowerCAmelCase__ :List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
lowerCAmelCase__ :Optional[Any] = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ :List[str] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.99_98, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCAmelCase__ :Dict = INVOICE_URL
lowerCAmelCase__ :str = 'What is the invoice number?'
lowerCAmelCase__ :Tuple = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def snake_case ( self ):
'''simple docstring'''
pass
| 293
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
_UpperCamelCase = []
_UpperCamelCase = list(lowerCAmelCase__ )
def __len__( self : Optional[Any] ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : str ) -> str:
'''simple docstring'''
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self : Optional[int] , lowerCAmelCase__ : Vector ) -> Vector:
'''simple docstring'''
_UpperCamelCase = len(self )
if size == len(lowerCAmelCase__ ):
_UpperCamelCase = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self : int , lowerCAmelCase__ : Vector ) -> Vector:
'''simple docstring'''
_UpperCamelCase = len(self )
if size == len(lowerCAmelCase__ ):
_UpperCamelCase = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Tuple , lowerCAmelCase__ : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : str , lowerCAmelCase__ : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : List[Any] , lowerCAmelCase__ : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (float, int) ):
_UpperCamelCase = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
_UpperCamelCase = len(self )
_UpperCamelCase = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception('''invalid operand!''' )
def snake_case__ ( self : str ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def snake_case__ ( self : int , lowerCAmelCase__ : int ) -> float:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCamelCase = value
def snake_case__ ( self : Any ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_UpperCamelCase = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def snake_case__ ( self : Any , lowerCAmelCase__ : Vector , lowerCAmelCase__ : bool = False ) -> float:
'''simple docstring'''
_UpperCamelCase = self * other
_UpperCamelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a__ ( lowercase : int ) -> Vector:
"""simple docstring"""
assert isinstance(lowercase, lowercase )
return Vector([0] * dimension )
def a__ ( lowercase : int, lowercase : int ) -> Vector:
"""simple docstring"""
assert isinstance(lowercase, lowercase ) and (isinstance(lowercase, lowercase ))
_UpperCamelCase = [0] * dimension
_UpperCamelCase = 1
return Vector(lowercase )
def a__ ( lowercase : float, lowercase : Vector, lowercase : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(lowercase, lowercase )
and isinstance(lowercase, lowercase )
and (isinstance(lowercase, (int, float) ))
)
return x * scalar + y
def a__ ( lowercase : int, lowercase : int, lowercase : int ) -> Vector:
"""simple docstring"""
random.seed(lowercase )
_UpperCamelCase = [random.randint(lowercase, lowercase ) for _ in range(lowercase )]
return Vector(lowercase )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : list[list[float]] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
_UpperCamelCase = matrix
_UpperCamelCase = w
_UpperCamelCase = h
def __str__( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase__ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase = []
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Tuple , lowerCAmelCase__ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase = []
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : List[str] , lowerCAmelCase__ : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : Optional[int] , lowerCAmelCase__ : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : Dict , lowerCAmelCase__ : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
_UpperCamelCase = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
_UpperCamelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.__height
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
return self.__width
def snake_case__ ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCamelCase = value
else:
raise Exception('''change_component: indices out of bounds''' )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_UpperCamelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception('''Indices out of bounds''' )
def snake_case__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCamelCase = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def a__ ( lowercase : int ) -> Matrix:
"""simple docstring"""
_UpperCamelCase = [[0] * n for _ in range(lowercase )]
return Matrix(lowercase, lowercase, lowercase )
def a__ ( lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> Matrix:
"""simple docstring"""
random.seed(lowercase )
_UpperCamelCase = [
[random.randint(lowercase, lowercase ) for _ in range(lowercase )] for _ in range(lowercase )
]
return Matrix(lowercase, lowercase, lowercase )
| 361
|
'''simple docstring'''
from math import isclose, sqrt
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> tuple[float, float, float]:
"""simple docstring"""
_UpperCamelCase = point_y / 4 / point_x
_UpperCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCamelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCamelCase = outgoing_gradient**2 + 4
_UpperCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
_UpperCamelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCamelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCamelCase = x_minus if isclose(lowercase, lowercase ) else x_plus
_UpperCamelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( lowercase : float = 1.4, lowercase : float = -9.6 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = first_x_coord
_UpperCamelCase = first_y_coord
_UpperCamelCase = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = next_point(lowercase, lowercase, lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287
| 0
|
"""simple docstring"""
A : List[Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = set()
# keep track of all the paths to be checked
__lowerCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase = path[-1]
if node not in explored:
__lowerCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase = [start]
__lowerCAmelCase = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
__lowerCAmelCase = {start: 0, target: -1}
while queue:
__lowerCAmelCase = queue.pop(0 )
if node == target:
__lowerCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
__lowerCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 57
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 0
|
'''simple docstring'''
from __future__ import annotations
class _lowerCAmelCase :
def __init__(self , lowercase = 0 ):
A_ : Dict = key
def _a (self , lowercase , lowercase ):
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
A_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase ) ^ key ) for ch in content]
def _a (self , lowercase , lowercase ):
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
A_ : int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase ) ^ key ) for ch in content]
def _a (self , lowercase , lowercase = 0 ):
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
A_ : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
A_ : Optional[int] = """"""
for ch in content:
ans += chr(ord(lowercase ) ^ key )
return ans
def _a (self , lowercase , lowercase = 0 ):
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
A_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
A_ : Any = """"""
for ch in content:
ans += chr(ord(lowercase ) ^ key )
return ans
def _a (self , lowercase , lowercase = 0 ):
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
try:
with open(lowercase ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase , lowercase ) )
except OSError:
return False
return True
def _a (self , lowercase , lowercase ):
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
try:
with open(lowercase ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase , lowercase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 135
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase :List[str] = 1_0_0
lowerCamelCase :Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase :int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
A_ : set[int] = set()
A_ : int
A_ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a ( lowerCamelCase__ = 50_00 ):
'''simple docstring'''
for number_to_partition in range(1 , lowerCamelCase__ ):
if len(partition(lowerCamelCase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 135
| 1
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[str]:
_A : Optional[Any] = parent
_A : int = batch_size
_A : List[Any] = seq_length
_A : List[Any] = is_training
_A : str = use_input_mask
_A : List[Any] = use_token_type_ids
_A : Any = use_labels
_A : Dict = vocab_size
_A : List[str] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Tuple = intermediate_size
_A : Optional[int] = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Any = type_vocab_size
_A : Optional[Any] = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : Any = num_labels
_A : int = num_choices
_A : Any = scope
def a__ ( self ) -> List[Any]:
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Union[str, Any] = None
if self.use_input_mask:
_A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Tuple = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
if self.use_labels:
_A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_A : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_A : Optional[int] = LlamaModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a )
_A : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]:
_A : Union[str, Any] = True
_A : Any = LlamaModel(_a )
model.to(_a )
model.eval()
_A : int = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_A : Optional[int] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
_A : Optional[int] = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
_A : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Any:
_A : Optional[Any] = True
_A : Tuple = True
_A : str = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
_A : Union[str, Any] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
_A : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_A : Dict = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
_A : Optional[int] = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
_A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> int:
_A : List[Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Any = config_and_inputs
_A : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_a = (LlamaForCausalLM,) if is_torch_available() else ()
_a = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = LlamaModelTester(self )
_A : Dict = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Optional[Any] = type
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Optional[Any] = input_dict["""input_ids"""]
_A : Tuple = input_ids.ne(1 ).to(_a )
_A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Tuple = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : int = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> Tuple:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = 3
_A : Union[str, Any] = """single_label_classification"""
_A : Union[str, Any] = input_dict["""input_ids"""]
_A : int = input_ids.ne(1 ).to(_a )
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A : Optional[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ) -> str:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : List[str] = 3
_A : Tuple = """multi_label_classification"""
_A : List[str] = input_dict["""input_ids"""]
_A : Union[str, Any] = input_ids.ne(1 ).to(_a )
_A : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_A : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
_A : Tuple = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def a__ ( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def a__ ( self , _a ) -> Any:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = ids_tensor([1, 10] , config.vocab_size )
_A : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : str = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
_A : Dict = original_model(_a ).last_hidden_state
_A : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
_A : Optional[Any] = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
_A : Tuple = scaled_model(_a ).last_hidden_state
_A : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Dict:
_A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_A : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_A : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Any:
_A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_A : Dict = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def a__ ( self ) -> Optional[Any]:
_A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : Tuple = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_A : Optional[int] = model(torch.tensor(_a ) )
# Expected mean on dim = -1
_A : Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
_A : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_A : int = model(torch.tensor(_a ) )
_A : Tuple = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _a , atol=1e-2 , rtol=1e-2 )
# fmt: off
_A : Any = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _a , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def a__ ( self ) -> Any:
_A : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_A : List[Any] = """Simply put, the theory of relativity states that """
_A : Dict = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_A : List[str] = tokenizer.encode(_a , return_tensors="""pt""" )
_A : Optional[Any] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=_a )
# greedy generation outputs
_A : Optional[Any] = model.generate(_a , max_new_tokens=64 , top_p=_a , temperature=1 , do_sample=_a )
_A : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
| 26
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ (__lowercase ):
lowerCamelCase__ = DistilBertTokenizer
lowerCamelCase__ = DistilBertTokenizerFast
lowerCamelCase__ = True
@slow
def __a ( self ) -> Tuple:
lowerCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 362
|
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 0
|
def __lowercase ( lowerCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase_ : Optional[Any] = numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
UpperCamelCase_ : Tuple = numbers[i]
if number < 0:
UpperCamelCase_, UpperCamelCase_ : List[str] = min_till_now, max_till_now
UpperCamelCase_ : List[str] = max(lowerCamelCase , max_till_now * number )
UpperCamelCase_ : Dict = min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
UpperCamelCase_ : List[str] = max(lowerCamelCase , lowerCamelCase )
return max_prod
| 50
|
def __lowercase ( lowerCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase_ : Optional[Any] = numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
UpperCamelCase_ : Tuple = numbers[i]
if number < 0:
UpperCamelCase_, UpperCamelCase_ : List[str] = min_till_now, max_till_now
UpperCamelCase_ : List[str] = max(lowerCamelCase , max_till_now * number )
UpperCamelCase_ : Dict = min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
UpperCamelCase_ : List[str] = max(lowerCamelCase , lowerCamelCase )
return max_prod
| 50
| 1
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_A = """true"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=16 ) -> Optional[Any]:
set_seed(42 )
lowerCAmelCase__ : Optional[int] = RegressionModel()
lowerCAmelCase__ : Optional[int] = deepcopy(__UpperCAmelCase )
lowerCAmelCase__ : str = RegressionDataset(length=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
return model, ddp_model, dataloader
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
lowerCAmelCase__ : Dict = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : Tuple = dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
lowerCAmelCase__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
if use_longest:
return tokenizer.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(__UpperCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(__UpperCAmelCase , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=16 )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = Accelerator(dispatch_batches=__UpperCAmelCase , split_batches=__UpperCAmelCase )
lowerCAmelCase__ : Any = get_dataloader(__UpperCAmelCase , not dispatch_batches )
lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : int = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCAmelCase )
targs.append(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = torch.cat(__UpperCAmelCase ), torch.cat(__UpperCAmelCase )
return logits, targs
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=82 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=16 ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_basic_setup(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = generate_predictions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert (
len(__UpperCAmelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCAmelCase )}"""
def lowercase_ ( __UpperCAmelCase = False , __UpperCAmelCase = False ) -> Union[str, Any]:
lowerCAmelCase__ : str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_mrpc_setup(__UpperCAmelCase , __UpperCAmelCase )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = setup["""no"""]
model.to(__UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCAmelCase )
with torch.inference_mode():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCAmelCase , references=batch["""labels"""] )
lowerCAmelCase__ : Tuple = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : Dict = model(**__UpperCAmelCase )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : Optional[int] = batch["""labels"""]
lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCAmelCase , references=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Optional[int] = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(__UpperCAmelCase , __UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Dict = Accelerator(split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(__UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
lowerCAmelCase__ : Union[str, Any] = Accelerator()
test_torch_metrics(__UpperCAmelCase , 512 )
accelerator.state._reset_state()
def lowercase_ ( __UpperCAmelCase ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 242
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = "Salesforce/blip-image-captioning-base"
_lowerCamelCase :int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCamelCase :List[Any] = "image_captioner"
_lowerCamelCase :Tuple = AutoModelForVisionaSeq
_lowerCamelCase :Dict = ["image"]
_lowerCamelCase :str = ["text"]
def __init__( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : "Image" ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=UpperCamelCase , return_tensors="""pt""" )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(**UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 242
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : List[str] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str ="""timesformer"""
def __init__( self , __a=2_24 , __a=16 , __a=3 , __a=8 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=1e-6 , __a=True , __a="divided_space_time" , __a=0 , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_frames
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = attention_type
__lowerCAmelCase = drop_path_rate
| 355
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A : Any = "bert-base-cased"
A : Any = "google/pegasus-xsum"
A : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
A : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
A : Optional[int] = "patrickvonplaten/t5-tiny-random"
A : int = "sshleifer/bart-tiny-random"
A : Optional[int] = "sshleifer/tiny-mbart"
A : Any = "sshleifer/tiny-marian-en-de"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "\n".join(_UpperCamelCase )
Path(_UpperCamelCase ).open("w" ).writelines(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.source" ) , _UpperCamelCase )
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.target" ) , _UpperCamelCase )
return tmp_dir
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCAmelCase , __lowerCAmelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCAmelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=20 , max_target_length=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCAmelCase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 1_28 , __a )
__lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCAmelCase = {x.name for x in save_dir.iterdir()}
__lowerCAmelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def snake_case ( self ):
if not FAIRSEQ_AVAILABLE:
return
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=64 )
__lowerCAmelCase = 64
__lowerCAmelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
__lowerCAmelCase = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
__lowerCAmelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for batch in data_loader:
__lowerCAmelCase = batch["input_ids"].shape
__lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCAmelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(f"too many tokens in {len(__a )} batches" )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=5_12 )
__lowerCAmelCase = 2
__lowerCAmelCase = ds.make_sortish_sampler(__a , shuffle=__a )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
__lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k="labels" ) ) < sum(count_pad_tokens(__a , k="labels" ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def snake_case ( self , __a=10_00 , __a=1_28 ):
if os.getenv("USE_REAL_DATA" , __a ):
__lowerCAmelCase = "examples/seq2seq/wmt_en_ro"
__lowerCAmelCase = max_len * 2 * 64
if not Path(__a ).joinpath("train.len" ).exists():
save_len_file(__a , __a )
else:
__lowerCAmelCase = "examples/seq2seq/test_data/wmt_en_ro"
__lowerCAmelCase = max_len * 4
save_len_file(__a , __a )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset()
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 259
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = True
@register_to_config
def __init__( self , _a = 3 , _a = 3 , _a = ("DownEncoderBlock2D",) , _a = ("UpDecoderBlock2D",) , _a = (64,) , _a = 1 , _a = "silu" , _a = 4 , _a = 32 , _a = 32 , _a = 0.18215 , ) -> Any:
super().__init__()
# pass init params to Encoder
_A : Tuple = Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
# pass init params to Decoder
_A : Union[str, Any] = Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , norm_num_groups=_a , act_fn=_a , )
_A : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_A : List[str] = nn.Convad(_a , _a , 1 )
_A : List[Any] = False
_A : Any = False
# only relevant if vae tiling is enabled
_A : Any = self.config.sample_size
_A : Union[str, Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_A : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_A : int = 0.25
def a__ ( self , _a , _a=False ) -> str:
if isinstance(_a , (Encoder, Decoder) ):
_A : Dict = value
def a__ ( self , _a = True ) -> int:
_A : int = use_tiling
def a__ ( self ) -> Dict:
self.enable_tiling(_a )
def a__ ( self ) -> Optional[int]:
_A : Tuple = True
def a__ ( self ) -> List[str]:
_A : List[Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : Dict = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> str:
_A : int = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Dict:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , _a , _a = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_a , return_dict=_a )
if self.use_slicing and x.shape[0] > 1:
_A : List[Any] = [self.encoder(_a ) for x_slice in x.split(1 )]
_A : Dict = torch.cat(_a )
else:
_A : Any = self.encoder(_a )
_A : Tuple = self.quant_conv(_a )
_A : Any = DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def a__ ( self , _a , _a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_a , return_dict=_a )
_A : Union[str, Any] = self.post_quant_conv(_a )
_A : int = self.decoder(_a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
@apply_forward_hook
def a__ ( self , _a , _a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_A : Dict = [self._decode(_a ).sample for z_slice in z.split(1 )]
_A : str = torch.cat(_a )
else:
_A : Optional[int] = self._decode(_a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_a )
def a__ ( self , _a , _a , _a ) -> Any:
_A : int = min(a.shape[2] , b.shape[2] , _a )
for y in range(_a ):
_A : List[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , _a , _a , _a ) -> Tuple:
_A : Union[str, Any] = min(a.shape[3] , b.shape[3] , _a )
for x in range(_a ):
_A : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , _a , _a = True ) -> AutoencoderKLOutput:
_A : Tuple = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_A : List[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
_A : Union[str, Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A : str = []
for i in range(0 , x.shape[2] , _a ):
_A : Dict = []
for j in range(0 , x.shape[3] , _a ):
_A : Optional[int] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A : Any = self.encoder(_a )
_A : List[str] = self.quant_conv(_a )
row.append(_a )
rows.append(_a )
_A : List[str] = []
for i, row in enumerate(_a ):
_A : Any = []
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A : str = self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_A : Tuple = self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_A : Tuple = torch.cat(_a , dim=2 )
_A : Union[str, Any] = DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def a__ ( self , _a , _a = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_A : List[str] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_A : List[str] = int(self.tile_sample_min_size * self.tile_overlap_factor )
_A : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A : Optional[Any] = []
for i in range(0 , z.shape[2] , _a ):
_A : List[str] = []
for j in range(0 , z.shape[3] , _a ):
_A : Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A : List[str] = self.post_quant_conv(_a )
_A : List[Any] = self.decoder(_a )
row.append(_a )
rows.append(_a )
_A : Dict = []
for i, row in enumerate(_a ):
_A : str = []
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A : Optional[Any] = self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_A : Tuple = self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_A : Optional[Any] = torch.cat(_a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def a__ ( self , _a , _a = False , _a = True , _a = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_A : Tuple = sample
_A : Union[str, Any] = self.encode(_a ).latent_dist
if sample_posterior:
_A : str = posterior.sample(generator=_a )
else:
_A : Any = posterior.mode()
_A : str = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 26
|
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
"""simple docstring"""
@staticmethod
def _a ( *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[str] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
A_ : Optional[Any] = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : str = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}],
[{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}],
] , )
@require_torch
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
A_ : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A_ : int = '''How many cats are there?'''
A_ : Tuple = vqa_pipeline(image=lowercase_ , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] )
A_ : List[str] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
lowercase_ , [{'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}, {'''score''': ANY(lowercase_ ), '''answer''': ANY(lowercase_ )}] )
@slow
@require_torch
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
A_ : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A_ : Dict = '''How many cats are there?'''
A_ : int = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
A_ : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}] )
A_ : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{'''score''': 0.87_99, '''answer''': '''2'''}, {'''score''': 0.2_96, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
| 355
|
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
A_ : int = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
A_ : str = -1
return False
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]:
A_ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 4
| 0
|
'''simple docstring'''
from manim import *
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : int = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase__ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : List[str] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : int = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : Dict = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : str = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : List[str] = Text("""CPU""" , font_size=24 )
lowerCamelCase__ : Dict = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase__ : str = [mem.copy() for i in range(4 )]
lowerCamelCase__ : Dict = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : Dict = Text("""GPU""" , font_size=24 )
lowerCamelCase__ : Union[str, Any] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Tuple = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : List[Any] = Text("""Model""" , font_size=24 )
lowerCamelCase__ : Any = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
rect.set_stroke(UpperCamelCase__ )
lowerCamelCase__ : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase__ , buff=0.0 )
self.add(UpperCamelCase__ )
model_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Any = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : Dict = Text("""Loaded Checkpoint""" , font_size=24 )
lowerCamelCase__ : Tuple = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Dict = []
for i, rect in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 )
target.move_to(UpperCamelCase__ )
ckpt_arr.append(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ )
lowerCamelCase__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : int = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase__ )
lowerCamelCase__ : Tuple = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCamelCase__ : int = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : Optional[int] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : List[str] = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase__ : Any = Text("""Disk""" , font_size=24 )
lowerCamelCase__ : List[str] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) , Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) )
lowerCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(FadeOut(UpperCamelCase__ ) )
lowerCamelCase__ : Any = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) , )
self.wait()
| 41
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 0
|
"""simple docstring"""
__a = {}
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
snake_case_ :str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
snake_case_ :Any = _calculate(days - 1, _lowercase, late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
snake_case_ :Optional[int] = _calculate(days - 1, absent + 1, 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
snake_case_ :str = _calculate(days - 1, _lowercase, 0 )
snake_case_ :Optional[Any] = state_late + state_absent + state_ontime
snake_case_ :Optional[Any] = prizestrings
return prizestrings
def A_ ( _lowercase = 30 ):
'''simple docstring'''
return _calculate(_lowercase, absent=0, late=0 )
if __name__ == "__main__":
print(solution())
| 66
|
"""simple docstring"""
from typing import Any
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> list:
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCamelCase = {}
lowerCamelCase = {}
for state in states_space:
lowerCamelCase = observations_space[0]
lowerCamelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
lowerCamelCase = observations_space[o]
lowerCamelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
# Update probabilities and pointers dicts
lowerCamelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase = arg_max
# The final observation
lowerCamelCase = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
lowerCamelCase = """"""
lowerCamelCase = -1
for k_state in states_space:
lowerCamelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase = probability
lowerCamelCase = k_state
lowerCamelCase = arg_max
# Process pointers backwards
lowerCamelCase = last_state
lowerCamelCase = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
lowerCamelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list'
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'{var_name} must be a list of strings'
raise ValueError(snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , ) -> None:
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def a__ ( snake_case__ , snake_case__ ) -> None:
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ) -> None:
if not isinstance(_object , snake_case__ ):
lowerCamelCase = F'{var_name} must be a dict'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
lowerCamelCase = F'{var_name} all keys must be strings'
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
lowerCamelCase = """nested dictionary """ if nested else """"""
lowerCamelCase = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291
| 0
|
def a_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import mpmath # for roots of unity
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None ) -> List[Any]:
# Input as list
__lowerCAmelCase = list(poly_a or [0] )[:]
__lowerCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowerCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowerCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowerCAmelCase = self.__multiply()
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str ) -> Optional[int]:
__lowerCAmelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase_ ) <= 1:
return dft[0]
#
__lowerCAmelCase = self.c_max_length // 2
while next_ncol > 0:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root**next_ncol
# First half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowerCAmelCase = new_dft
__lowerCAmelCase = next_ncol // 2
return dft[0]
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = self.__dft('A' )
__lowerCAmelCase = self.__dft('B' )
__lowerCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCAmelCase = 2
while next_ncol <= self.c_max_length:
__lowerCAmelCase = [[] for i in range(lowerCAmelCase_ )]
__lowerCAmelCase = self.root ** (next_ncol // 2)
__lowerCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowerCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> int:
__lowerCAmelCase = 'A = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowerCAmelCase = 'B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowerCAmelCase = 'A*B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _UpperCamelCase ( UpperCamelCase__ ):
if hor == 1_2_8:
UpperCAmelCase__ : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Tuple = (3_2, 1_2_8, 2_5_6)
UpperCAmelCase__ : Union[str, Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 3_2:
UpperCAmelCase__ : Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Union[str, Any] = (3_2, 6_4, 1_2_8, 2_5_6)
UpperCAmelCase__ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCAmelCase__ : Any = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCAmelCase__ : Tuple = model.state_dict()
UpperCAmelCase__ : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 1_4,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5_5_3_6,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCAmelCase__ : List[Any] = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = {
"""in_channels""": 1_4,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5_5_3_6,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCAmelCase__ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCAmelCase__ : Optional[Any] = model
UpperCAmelCase__ : Dict = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 163
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
A : Any = XGLMConfig
A : Dict = {}
A : Optional[int] = 'gelu'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , ) -> Any:
snake_case_ : List[Any] = parent
snake_case_ : str = batch_size
snake_case_ : Dict = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : Optional[int] = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Any = d_model
snake_case_ : int = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Tuple = ffn_dim
snake_case_ : List[Any] = activation_function
snake_case_ : Optional[int] = activation_dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Any = initializer_range
snake_case_ : Dict = None
snake_case_ : List[Any] = 0
snake_case_ : Tuple = 2
snake_case_ : int = 1
def _lowerCAmelCase ( self ) -> Optional[Any]:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case_ : Any = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[Any] = self.get_config()
snake_case_ : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = self.prepare_config_and_inputs()
(
snake_case_
) : str = config_and_inputs
snake_case_ : Union[str, Any] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
A : int = False
A : str = False
A : Union[str, Any] = False
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Dict = TFXGLMModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def _lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[str] = TFXGLMModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowerCAmelCase ( self ) -> int:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
snake_case_ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
snake_case_ : str = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case_ : Optional[Any] = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
snake_case_ : Optional[int] = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
snake_case_ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
snake_case_ : str = tokenizer("Today is a nice day and" , return_tensors="tf" )
snake_case_ : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
snake_case_ : List[str] = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , seed=[7, 0] )
snake_case_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Dict = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
snake_case_ : Tuple = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
snake_case_ : Any = "left"
# use different length sentences to test batching
snake_case_ : Dict = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
snake_case_ : Dict = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="tf" , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : int = inputs["input_ids"]
snake_case_ : Tuple = model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
snake_case_ : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
snake_case_ : Dict = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_new_tokens=12 )
snake_case_ : int = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
snake_case_ : Tuple = model.generate(input_ids=_SCREAMING_SNAKE_CASE , max_new_tokens=12 )
snake_case_ : Optional[Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : int = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
| 359
|
import argparse
import copy
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : List[Any] = {}
with open(_a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : Dict = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[int] ):
with open(_a ) as f:
snake_case_ : List[str] = f.read(1 )
snake_case_ : Optional[Any] = start_node
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = start_node
snake_case_ : int = 0
while visiting not in first_solution:
snake_case_ : List[str] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_a ) and k[0] not in first_solution:
snake_case_ : List[str] = k[1]
snake_case_ : Dict = k[0]
first_solution.append(_a )
snake_case_ : Dict = distance_of_first_solution + int(_a )
snake_case_ : Optional[int] = best_node
first_solution.append(_a )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def lowerCAmelCase__ ( _a : Optional[int] , _a : List[str] ):
snake_case_ : Optional[Any] = []
for n in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
for kn in solution[1:-1]:
snake_case_ : Any = solution.index(_a )
if n == kn:
continue
snake_case_ : Optional[int] = copy.deepcopy(_a )
snake_case_ : int = kn
snake_case_ : Any = n
snake_case_ : List[Any] = 0
for k in _tmp[:-1]:
snake_case_ : str = _tmp[_tmp.index(_a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Any = distance + int(i[1] )
_tmp.append(_a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Union[str, Any] , _a : int ):
snake_case_ : str = 1
snake_case_ : List[str] = first_solution
snake_case_ : int = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : int = solution
while count <= iters:
snake_case_ : Optional[Any] = find_neighborhood(_a , _a )
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Dict = len(_a ) - 1
snake_case_ : List[Any] = False
while not found:
snake_case_ : int = 0
while i < len(_a ):
if best_solution[i] != solution[i]:
snake_case_ : str = best_solution[i]
snake_case_ : Any = solution[i]
break
snake_case_ : Dict = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = best_solution[:-1]
snake_case_ : List[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Union[str, Any] = cost
snake_case_ : Optional[int] = solution
else:
snake_case_ : Union[str, Any] = index_of_best_solution + 1
snake_case_ : int = neighborhood[index_of_best_solution]
if len(_a ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def lowerCAmelCase__ ( _a : str=None ):
snake_case_ : Optional[Any] = generate_neighbours(args.File )
snake_case_ , snake_case_ : List[Any] = generate_first_solution(
args.File , _a )
snake_case_ , snake_case_ : int = tabu_search(
_a , _a , _a , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "git_vision_model"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase="quick_gelu" , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , **__lowerCamelCase , ) -> Any:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : int = intermediate_size
_A : int = num_hidden_layers
_A : Dict = num_attention_heads
_A : Dict = num_channels
_A : List[Any] = patch_size
_A : str = image_size
_A : str = initializer_range
_A : int = attention_dropout
_A : Tuple = layer_norm_eps
_A : Dict = hidden_act
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase , **__lowerCamelCase) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase)
_A , _A : Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type") == "git":
_A : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase)
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "git"
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=6 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=1_0_1 , __lowerCamelCase=1_0_2 , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , pad_token_id=__lowerCamelCase , **__lowerCamelCase)
if vision_config is None:
_A : List[str] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
_A : str = GitVisionConfig(**__lowerCamelCase)
_A : Tuple = vocab_size
_A : str = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Optional[Any] = hidden_act
_A : str = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : List[Any] = initializer_range
_A : List[Any] = layer_norm_eps
_A : Union[str, Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : int = tie_word_embeddings
_A : int = num_image_with_embedding
_A : Optional[Any] = bos_token_id
_A : str = eos_token_id
def _lowerCamelCase ( self) -> Dict:
_A : str = copy.deepcopy(self.__dict__)
_A : Any = self.vision_config.to_dict()
_A : Tuple = self.__class__.model_type
return output
| 11
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase__ ( A__ : List[str] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = iter(__lowerCAmelCase )
while True:
__lowerCamelCase = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowerCamelCase = ''''''
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = generate_table(__lowerCAmelCase )
__lowerCamelCase = prepare_input(__lowerCAmelCase )
__lowerCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
__lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 )
__lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase__ ( A__ : Any , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = generate_table(__lowerCAmelCase )
__lowerCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
__lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 )
__lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 371
|
import qiskit
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowerCamelCase = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 29
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
UpperCAmelCase : Any = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase : int = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase : Union[str, Any] = max(len(UpperCamelCase ) , len(UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase ) , b_binary.zfill(UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A: Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
__lowerCAmelCase : Optional[datasets.Features] = None
__lowerCAmelCase : str = "utf-8"
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : bool = True # deprecated
__lowerCAmelCase : Optional[int] = None # deprecated
__lowerCAmelCase : int = 10 << 20 # 10MB
__lowerCAmelCase : Optional[bool] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : int = JsonConfig
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCAmelCase : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
UpperCAmelCase : int = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = [files]
UpperCAmelCase : List[str] = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase : List[str] = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = [files]
UpperCAmelCase : str = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase : Tuple = self.config.features.arrow_schema.field(_SCREAMING_SNAKE_CASE ).type
UpperCAmelCase : int = pa_table.append_column(_SCREAMING_SNAKE_CASE , pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=_SCREAMING_SNAKE_CASE ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase : Any = table_cast(_SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
# We keep only the field we are interested in
UpperCAmelCase : Dict = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase : Union[str, Any] = {col: [row.get(_SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
else:
UpperCAmelCase : Optional[Any] = dataset
UpperCAmelCase : List[str] = pa.Table.from_pydict(_SCREAMING_SNAKE_CASE )
yield file_idx, self._cast_table(_SCREAMING_SNAKE_CASE )
# If the file has one json object per line
else:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
UpperCAmelCase : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase : int = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase : Dict = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCAmelCase : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_SCREAMING_SNAKE_CASE )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase : Tuple = batch.decode(self.config.encoding , errors=_SCREAMING_SNAKE_CASE ).encode("""utf-8""" )
try:
while True:
try:
UpperCAmelCase : int = paj.read_json(
io.BytesIO(_SCREAMING_SNAKE_CASE ) , read_options=paj.ReadOptions(block_size=_SCREAMING_SNAKE_CASE ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_SCREAMING_SNAKE_CASE , pa.ArrowInvalid )
and "straddling" not in str(_SCREAMING_SNAKE_CASE )
or block_size > len(_SCREAMING_SNAKE_CASE )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(_SCREAMING_SNAKE_CASE )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase : str = json.load(_SCREAMING_SNAKE_CASE )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase : Dict = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase : Union[str, Any] = {col: [row.get(_SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
UpperCAmelCase : Dict = pa.Table.from_pydict(_SCREAMING_SNAKE_CASE )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(_SCREAMING_SNAKE_CASE )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_SCREAMING_SNAKE_CASE )
batch_idx += 1
| 109
| 1
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[str]:
_snake_case = model.config
_snake_case = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
_snake_case = MBartConfig(
is_decoder=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , add_cross_attention=__lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__lowerCamelCase , add_final_layer_norm=__lowerCamelCase , )
return encoder_config, decoder_config
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Tuple:
if "encoder.model" in name:
_snake_case = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_snake_case = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_snake_case = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_snake_case = '''encoder.''' + name
if "attn.proj" in name:
_snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_snake_case = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_snake_case = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_snake_case = '''encoder.layernorm.bias'''
return name
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_snake_case = key.split('''.''' )
_snake_case = int(key_split[3] )
_snake_case = int(key_split[5] )
_snake_case = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_snake_case = val
return orig_state_dict
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=False ) -> Any:
# load original model
_snake_case = DonutModel.from_pretrained(__lowerCamelCase ).eval()
# load HuggingFace model
_snake_case , _snake_case = get_configs(__lowerCamelCase )
_snake_case = DonutSwinModel(__lowerCamelCase )
_snake_case = MBartForCausalLM(__lowerCamelCase )
_snake_case = VisionEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
model.eval()
_snake_case = original_model.state_dict()
_snake_case = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify results on scanned document
_snake_case = load_dataset('''hf-internal-testing/example-documents''' )
_snake_case = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_snake_case = XLMRobertaTokenizerFast.from_pretrained(__lowerCamelCase , from_slow=__lowerCamelCase )
_snake_case = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_snake_case = DonutProcessor(__lowerCamelCase , __lowerCamelCase )
_snake_case = processor(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_snake_case = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_snake_case = '''When is the coffee break?'''
_snake_case = task_prompt.replace('''{user_input}''' , __lowerCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_snake_case = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_snake_case = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_snake_case = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_snake_case = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_snake_case = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_snake_case = original_model.decoder.tokenizer(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_snake_case = original_model.encoder.model.patch_embed(__lowerCamelCase )
_snake_case , _snake_case = model.encoder.embeddings(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
# verify encoder hidden states
_snake_case = original_model.encoder(__lowerCamelCase )
_snake_case = model.encoder(__lowerCamelCase ).last_hidden_state
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
# verify decoder hidden states
_snake_case = original_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).logits
_snake_case = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
UpperCAmelCase__ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 358
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 5_12}
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
_A = set(__lowercase )
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : str="__start__" , __UpperCAmelCase : Dict="__end__" , __UpperCAmelCase : str="__unk__" , __UpperCAmelCase : List[str]="__null__" , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , **__UpperCAmelCase )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = re.sub("([.,!?()])" , R" \1" , __UpperCAmelCase )
_A = re.sub("(')" , R" \1 " , __UpperCAmelCase )
_A = re.sub(R"\s{2,}" , " " , __UpperCAmelCase )
if "\n" in token:
_A = token.replace("\n" , " __newln__" )
_A = token.split(" " )
_A = []
for token in tokens:
if not len(__UpperCAmelCase ):
continue
_A = token.lower()
_A = tuple(__UpperCAmelCase )
_A = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
words.append(__UpperCAmelCase )
continue
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = "@@ ".join(__UpperCAmelCase )
_A = word[:-4]
_A = word
words.append(__UpperCAmelCase )
return " ".join(__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
_A = re.findall(R"\S+\n?" , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = token.lower()
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = " ".join(__UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 79
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( UpperCAmelCase_ ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[str] = image[0].size
UpperCAmelCase , UpperCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : List[Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
UpperCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Any = 2.0 * image - 1.0
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[Any] = mask[0].size
UpperCAmelCase , UpperCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : Any = mask.astype(np.floataa ) / 255.0
UpperCAmelCase : str = 0
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase : List[str] = torch.cat(UpperCAmelCase_ , dim=0 )
return mask
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : UNetaDModel
UpperCAmelCase_ : RePaintScheduler
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : int = 250 , lowercase_ : float = 0.0 , lowercase_ : int = 10 , lowercase_ : int = 10 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Dict = image
UpperCAmelCase : Optional[int] = _preprocess_image(lowercase_ )
UpperCAmelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Optional[Any] = _preprocess_mask(lowercase_ )
UpperCAmelCase : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : List[str] = original_image.shape
UpperCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device )
UpperCAmelCase : Tuple = eta
UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase : List[Any] = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase : str = self.unet(lowercase_ , lowercase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase : int = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = t
UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 151
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
lowerCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ' Hello world! cécé herlolip'
lowerCAmelCase__ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = dct.pop(lowerCAmelCase__ )
lowerCAmelCase__ = val
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = torch.load(lowerCAmelCase__ , map_location='cpu' )
lowerCAmelCase__ = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not os.path.exists(lowerCAmelCase__ ):
lowerCAmelCase__ = torch.hub.load('pytorch/fairseq' , lowerCAmelCase__ ).eval()
else:
lowerCAmelCase__ = load_xsum_checkpoint(lowerCAmelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase__ = checkpoint_path.replace('.' , '-' )
lowerCAmelCase__ = BartConfig.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = bart.encode(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase__ = BartTokenizer.from_pretrained(lowerCAmelCase__ ).encode(lowerCAmelCase__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(lowerCAmelCase__ , lowerCAmelCase__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase__ = bart.state_dict()
remove_ignore_keys_(lowerCAmelCase__ )
lowerCAmelCase__ = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = BartForSequenceClassification(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
lowerCAmelCase__ = bart.predict('mnli' , lowerCAmelCase__ , return_logits=lowerCAmelCase__ )
lowerCAmelCase__ = model(lowerCAmelCase__ )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase__ = bart.model.state_dict()
remove_ignore_keys_(lowerCAmelCase__ )
lowerCAmelCase__ = state_dict['decoder.embed_tokens.weight']
lowerCAmelCase__ = bart.extract_features(lowerCAmelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase__ = BartModel(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
lowerCAmelCase__ = model(lowerCAmelCase__ ).model[0]
else:
lowerCAmelCase__ = BartForConditionalGeneration(lowerCAmelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , 'lm_head' ):
lowerCAmelCase__ = make_linear_from_emb(model.model.shared )
lowerCAmelCase__ = model.model(lowerCAmelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
lowerCAmelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['input_values', 'attention_mask']
def __init__( self : Union[str, Any] , lowercase__ : int = 1 , lowercase__ : int = 16_000 , lowercase__ : float = 0.0 , lowercase__ : bool = False , lowercase__ : int = 80 , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : str = "hann_window" , lowercase__ : float = 1.0 , lowercase__ : float = 80 , lowercase__ : float = 7_600 , lowercase__ : float = 1e-10 , lowercase__ : int = 2 , lowercase__ : bool = True , **lowercase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = hop_length
lowerCAmelCase__ = win_length
lowerCAmelCase__ = win_function
lowerCAmelCase__ = frame_signal_scale
lowerCAmelCase__ = fmin
lowerCAmelCase__ = fmax
lowerCAmelCase__ = mel_floor
lowerCAmelCase__ = reduction_factor
lowerCAmelCase__ = win_length * sampling_rate // 1_000
lowerCAmelCase__ = hop_length * sampling_rate // 1_000
lowerCAmelCase__ = optimal_fft_length(self.sample_size)
lowerCAmelCase__ = (self.n_fft // 2) + 1
lowerCAmelCase__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase__)
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( lowercase__ : List[np.ndarray] , lowercase__ : List[np.ndarray] , lowercase__ : float = 0.0):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ = np.array(lowercase__ , np.intaa)
lowerCAmelCase__ = []
for vector, length in zip(lowercase__ , attention_mask.sum(-1)):
lowerCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
lowerCAmelCase__ = padding_value
normed_input_values.append(lowercase__)
else:
lowerCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __snake_case ( self : List[Any] , lowercase__ : np.ndarray , ):
'''simple docstring'''
lowerCAmelCase__ = spectrogram(
lowercase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : str , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if audio is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
else:
lowerCAmelCase__ = None
if audio_target is not None:
lowerCAmelCase__ = self._process_audio(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase__ = inputs_target['input_values']
lowerCAmelCase__ = inputs_target.get('attention_mask')
if decoder_attention_mask is not None:
lowerCAmelCase__ = decoder_attention_mask
return inputs
def __snake_case ( self : Tuple , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : bool = False , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ = isinstance(lowercase__ , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
lowerCAmelCase__ = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase__ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase__ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase__ = [self._extract_mel_features(lowercase__) for waveform in speech]
lowerCAmelCase__ = BatchFeature({'input_values': features})
lowerCAmelCase__ = self.num_mel_bins
else:
lowerCAmelCase__ = BatchFeature({'input_values': speech})
lowerCAmelCase__ = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = feature_size_hack
# convert input values to correct format
lowerCAmelCase__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for array in input_values]
elif (
not isinstance(lowercase__ , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase__ = [array.astype(np.floataa) for array in input_values]
elif isinstance(lowercase__ , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase__ = padded_inputs.get('attention_mask')
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase__ = (
attention_mask
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase__ , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 119
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : str ):
_A = ZeroShotClassificationPipeline(
model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ):
_A = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
# No kwarg
_A = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
_A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
_A = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_A = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_A = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_A = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]}
for i in range(1 )
] , )
_A = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_UpperCAmelCase ):
classifier(_UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(_UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_UpperCAmelCase , )
self.run_entailment_id(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Pipeline ):
_A = zero_shot_classifier.model.config
_A = config.labelaid
_A = zero_shot_classifier.entailment_id
_A = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_A = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_A = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_A = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_A = original_labelaid
self.assertEqual(_UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def lowerCAmelCase_ ( self : Any ):
_A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
_A = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
_A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
_A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_UpperCAmelCase , )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCAmelCase_ ( self : List[Any] ):
_A = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_A = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
_A = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_UpperCAmelCase , )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 271
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a = get_logger(__name__)
class lowercase_ ( enum.Enum ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = '''all_checks'''
UpperCAmelCase : List[Any] = '''basic_checks'''
UpperCAmelCase : Any = '''no_checks'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def _snake_case ( _snake_case : Optional[dict] , _snake_case : dict , _snake_case : Dict=None ) -> Dict:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_snake_case ) - set(_snake_case ) ) )
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_snake_case ) - set(_snake_case ) ) )
_A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_A = ' for ' + verification_name if verification_name is not None else ''
if len(_snake_case ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def _snake_case ( _snake_case : Optional[dict] , _snake_case : dict ) -> List[str]:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise ExpectedMoreSplits(str(set(_snake_case ) - set(_snake_case ) ) )
if len(set(_snake_case ) - set(_snake_case ) ) > 0:
raise UnexpectedSplits(str(set(_snake_case ) - set(_snake_case ) ) )
_A = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_snake_case ) > 0:
raise NonMatchingSplitsSizesError(str(_snake_case ) )
logger.info('All the splits matched successfully.' )
def _snake_case ( _snake_case : str , _snake_case : bool = True ) -> dict:
'''simple docstring'''
if record_checksum:
_A = shaaaa()
with open(_snake_case , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'' ):
m.update(_snake_case )
_A = m.hexdigest()
else:
_A = None
return {"num_bytes": os.path.getsize(_snake_case ), "checksum": checksum}
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 271
| 1
|
'''simple docstring'''
_lowercase : Optional[int] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 93
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = "▁"
_UpperCAmelCase : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_UpperCAmelCase : str = {
"facebook/s2t-small-librispeech-asr": 1024,
}
_UpperCAmelCase : List[str] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_UpperCAmelCase : Tuple = {"mustc": MUSTC_LANGS}
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = []
def __init__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: List[str]="</s>" , _lowerCAmelCase: Optional[int]="<pad>" , _lowerCAmelCase: List[str]="<unk>" , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Tuple , ):
lowercase :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_upper_case=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , lang_codes=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :List[str] = do_upper_case
lowercase :List[str] = do_lower_case
lowercase :str = load_json(_lowerCAmelCase )
lowercase :List[Any] = {v: k for k, v in self.encoder.items()}
lowercase :str = spm_file
lowercase :Any = load_spm(_lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
lowercase :List[str] = lang_codes
lowercase :Tuple = LANGUAGES[lang_codes]
lowercase :Any = [F"<lang:{lang}>" for lang in self.langs]
lowercase :Any = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
lowercase :Optional[Any] = self.lang_tokens
lowercase :List[str] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase :Tuple = {}
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return len(self.encoder )
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[Any] ):
lowercase :Any = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ):
lowercase :Any = self.lang_code_to_id[tgt_lang]
lowercase :List[Any] = [lang_code_id]
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Tuple ):
return self.encoder.get(_lowerCAmelCase , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: int ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :int = []
lowercase :Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase :Dict = self.sp_model.decode(_lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :Dict = self.sp_model.decode(_lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase :int = [1] * len(self.prefix_tokens )
lowercase :List[str] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ):
lowercase :Union[str, Any] = self.__dict__.copy()
lowercase :Dict = None
return state
def __setstate__( self: Union[str, Any] , _lowerCAmelCase: Dict ):
lowercase :List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :List[Any] = {}
lowercase :Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
lowercase :List[str] = Path(_lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
lowercase :List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase :Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :int = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (str(_lowerCAmelCase ), str(_lowerCAmelCase ))
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def UpperCAmelCase__ ( lowerCamelCase ):
with open(lowerCamelCase, "r" ) as f:
return json.load(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, "w" ) as f:
json.dump(lowerCamelCase, lowerCamelCase, indent=2 )
| 236
| 0
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = list(zip(__A , __A ) )
UpperCAmelCase__ , UpperCAmelCase__ = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return tok(__A , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ = new_src + """ """ + src
UpperCAmelCase__ = new_tgt + """ """ + tgt
if is_too_big(__A ) or is_too_big(__A ): # cant fit, finalize example
finished_src.append(__A )
finished_tgt.append(__A )
UpperCAmelCase__ , UpperCAmelCase__ = src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__A )
finished_tgt.append(__A )
return finished_src, finished_tgt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = Path(__A )
save_path.mkdir(exist_ok=__A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
UpperCAmelCase__ = [x.rstrip() for x in Path(__A ).open().readlines()]
UpperCAmelCase__ = [x.rstrip() for x in Path(__A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ = pack_examples(__A , __A , __A , __A )
print(F'''packed {split} split from {len(__A )} examples -> {len(__A )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(__A ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(__A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(__A , save_path / F'''{split}.source''' )
shutil.copyfile(__A , save_path / F'''{split}.target''' )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=__A , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=__A , default=128 )
parser.add_argument("""--data_dir""" , type=__A )
parser.add_argument("""--save_path""" , type=__A )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 357
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
UpperCAmelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
UpperCAmelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase__ = token_dict["""token"""]
UpperCAmelCase__ = Tokenizer(Unigram() )
UpperCAmelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
UpperCAmelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase__ = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
UpperCAmelCase__ = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
UpperCAmelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = json.loads(self._tokenizer.to_str() )
UpperCAmelCase__ = self.special_tokens["""unk"""]["""id"""]
UpperCAmelCase__ = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 61
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Dict=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
snake_case = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if decoder_head_mask is None:
snake_case = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE_ )
if cross_attn_head_mask is None:
snake_case = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase="relu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=20 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = max_position_embeddings
snake_case = eos_token_id
snake_case = pad_token_id
snake_case = bos_token_id
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = self.eos_token_id # Eos Token
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case = input_ids.clamp(self.pad_token_id + 1 )
snake_case = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case = self.get_config()
snake_case = prepare_mam_aaa_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def snake_case ( self ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = MaMaaaModel(config=lowerCAmelCase ).get_decoder().to(lowerCAmelCase ).eval()
snake_case = inputs_dict['input_ids']
snake_case = inputs_dict['attention_mask']
snake_case = inputs_dict['head_mask']
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = MaMaaaModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = model(**lowerCAmelCase )
snake_case = outputs.encoder_last_hidden_state
snake_case = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase )
snake_case = MaMaaaEncoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase )
snake_case = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase )
snake_case = MaMaaaDecoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase )
snake_case = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : str = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[int] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : int = False
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def snake_case ( self ):
"""simple docstring"""
snake_case = MaMaaaModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
snake_case = model_class.from_pretrained(lowerCAmelCase , output_loading_info=lowerCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = copy.deepcopy(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
if not self.is_encoder_decoder:
snake_case = inputs['input_ids']
del inputs["input_ids"]
else:
snake_case = inputs['input_ids']
snake_case = inputs.get('decoder_input_ids' , lowerCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , lowerCAmelCase )
snake_case = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case = wte(lowerCAmelCase )
else:
snake_case = wte(lowerCAmelCase )
snake_case = wte(lowerCAmelCase )
with torch.no_grad():
model(**lowerCAmelCase )[0]
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = MaMaaaForConditionalGeneration(lowerCAmelCase ).eval().to(lowerCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase )
model.generate(num_beams=4 , do_sample=lowerCAmelCase , early_stopping=lowerCAmelCase , num_return_sequences=3 )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Dict:
"""simple docstring"""
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def snake_case ( self ):
"""simple docstring"""
snake_case = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(lowerCAmelCase )
snake_case = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
snake_case = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
snake_case = prepare_mam_aaa_inputs_dict(model.config , lowerCAmelCase , lowerCAmelCase )
with torch.no_grad():
snake_case = model(**lowerCAmelCase )[0]
snake_case = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , lowerCAmelCase )
# change to expected output here
snake_case = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(lowerCAmelCase )
# change to intended input
snake_case = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
snake_case = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
snake_case = prepare_mam_aaa_inputs_dict(model.config , lowerCAmelCase , lowerCAmelCase )
with torch.no_grad():
snake_case = model(**lowerCAmelCase )[0]
snake_case = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
# change to expected output here
snake_case = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(lowerCAmelCase )
snake_case = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
snake_case = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='pt' )
snake_case = model.generate(
input_ids=dct['input_ids'].to(lowerCAmelCase ) , attention_mask=dct['attention_mask'].to(lowerCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
snake_case = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
snake_case = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
assert generated == expected_en
| 150
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : int = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
lowercase__ : int = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[int] = """sigmoid"""
__lowerCamelCase : List[Any] = """softmax"""
__lowerCamelCase : Optional[Any] = """none"""
@add_end_docstrings(
__snake_case , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = ClassificationFunction.NONE
def __init__( self , **a):
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def snake_case_ ( self , a=None , a=None , a="" , **a):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowercase__ : List[str] = tokenizer_kwargs
lowercase__ : Dict = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
lowercase__ : Union[str, Any] = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
lowercase__ : Tuple = top_k
lowercase__ : Any = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
lowercase__ : Union[str, Any] = None
else:
lowercase__ : Union[str, Any] = 1
if isinstance(a , a):
lowercase__ : Tuple = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase__ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a):
lowercase__ : int = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase__ : Any = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case_ ( self , a , **a):
lowercase__ : List[str] = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def snake_case_ ( self , a):
return self.model(**a)
def snake_case_ ( self , a , a=None , a=1 , a=True):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase__ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase__ : Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
lowercase__ : List[str] = self.model.config.function_to_apply
else:
lowercase__ : Union[str, Any] = ClassificationFunction.NONE
lowercase__ : str = model_outputs['logits'][0]
lowercase__ : Dict = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase__ : int = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase__ : List[Any] = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
lowercase__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase__ : Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
lowercase__ : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 214
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 )
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ):
lowerCAmelCase = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ):
lowerCAmelCase = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase = int(parent_a[1] * 100 ) + 1
lowerCAmelCase = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0]
lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase ,lowerCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase : Tuple = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__UpperCamelCase : str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 309
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 340
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : str = kwargs.pop("encoder")
_lowerCAmelCase : Any = encoder_config.pop("model_type")
_lowerCAmelCase : str = kwargs.pop("decoder")
_lowerCAmelCase : List[str] = decoder_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a)
_lowerCAmelCase : Optional[int] = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : List[str] = self.decoder.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : List[str] = super().generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape
_lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : List[str] = dummy_input.pop("input_ids")
_lowerCAmelCase : List[str] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Optional[int] = torch.zeros(__a)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__a)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
| 36
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __snake_case , unittest.TestCase ):
A__ : Any = DiTPipeline
A__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A__ : Any = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A__ : List[str] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
torch.manual_seed(0 )
__a = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=UpperCAmelCase , )
__a = AutoencoderKL()
__a = DDIMScheduler()
__a = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
if str(UpperCAmelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCAmelCase )
else:
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__a = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'cpu'
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe(**UpperCAmelCase ).images
__a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
__a = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = torch.manual_seed(0 )
__a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__a = ['vase', 'umbrella', 'white shark', 'white wolf']
__a = pipe.get_label_ids(UpperCAmelCase )
__a = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__a = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__a = ['vase', 'umbrella']
__a = pipe.get_label_ids(UpperCAmelCase )
__a = torch.manual_seed(0 )
__a = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 197
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase( __lowerCamelCase ):
for param in module.parameters():
__a = False
def lowerCAmelCase( ):
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCAmelCase( __lowerCamelCase ):
__a = plt.imshow(__lowerCamelCase )
fig.axes.get_xaxis().set_visible(__lowerCamelCase )
fig.axes.get_yaxis().set_visible(__lowerCamelCase )
plt.show()
def lowerCAmelCase( ):
__a = datetime.now()
__a = current_time.strftime('%H:%M:%S' )
return timestamp
| 197
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowercase = '''true'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=16 ):
'''simple docstring'''
set_seed(42 )
__UpperCamelCase :List[str] = RegressionModel()
__UpperCamelCase :List[Any] = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
__UpperCamelCase , __UpperCamelCase :Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__UpperCamelCase :Optional[int] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
__UpperCamelCase :Any = dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__UpperCamelCase :Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=16 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = get_dataloader(SCREAMING_SNAKE_CASE , not dispatch_batches )
__UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = []
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase :Tuple = batch.values()
with torch.no_grad():
__UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCamelCase , __UpperCamelCase :List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE )
targs.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE ), torch.cat(SCREAMING_SNAKE_CASE )
return logits, targs
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=16 ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = get_basic_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = generate_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert (
len(SCREAMING_SNAKE_CASE ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE )}"""
def lowerCamelCase ( SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :Dict = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase , __UpperCamelCase :Dict = get_mrpc_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# First do baseline
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = setup['''no''']
model.to(SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE )
with torch.inference_mode():
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=batch['''labels'''] )
__UpperCamelCase :Tuple = metric.compute()
# Then do distributed
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = outputs.logits.argmax(dim=-1 )
__UpperCamelCase :Optional[Any] = batch['''labels''']
__UpperCamelCase , __UpperCamelCase :Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCamelCase :Union[str, Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__UpperCamelCase :Union[str, Any] = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 43
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29
| 0
|
from collections.abc import Generator
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[Any] = 0, 1
while True:
snake_case__ : str = b, a + b
yield b
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ):
snake_case__ : Optional[Any] = 1
snake_case__ : List[Any] = fibonacci_generator()
while len(str(next(snake_case_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 355
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 286
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCamelCase )
return parser.parse_args()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCAmelCase : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase : List[Any] = script_fpath.stem
_lowerCAmelCase : List[str] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
_lowerCAmelCase : List[str] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 36
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Dict , *A : Any , **A : List[Any] ) ->None:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 142
| 0
|
import requests
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowerCamelCase ={"""Content-Type""": """application/json"""}
lowerCamelCase =requests.post(_UpperCAmelCase , json={"""text""": message_body} , headers=_UpperCAmelCase )
if response.status_code != 2_00:
lowerCamelCase =(
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 262
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( a , a , unittest.TestCase ):
__A = IFInpaintingSuperResolutionPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__A = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _snake_case ( self ):
return self._get_superresolution_dummy_components()
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith("""mps""" ):
lowerCamelCase =torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 262
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__snake_case = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__snake_case = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__snake_case = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ['input_ids', 'attention_mask']
_a = []
_a = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase__ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ :Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :int = len(self.sp_model )
UpperCamelCase__ :Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase__ :List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ :Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ :Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ :Any = src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ :Optional[Any] = self.lang_code_to_id[self._src_lang]
UpperCamelCase__ :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.__dict__.copy()
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Optional[int] = {}
UpperCamelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :List[str] = [1] * len(self.prefix_tokens )
UpperCamelCase__ :int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ :Tuple = src_lang
UpperCamelCase__ :Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase__ :Dict = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ :Any = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip()
return out_string
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ :int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = "en_XX" , UpperCamelCase_ = None , UpperCamelCase_ = "ro_RO" , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = src_lang
UpperCamelCase__ :Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = self.lang_code_to_id[src_lang]
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.lang_code_to_id[lang]
UpperCamelCase__ :Optional[Any] = []
UpperCamelCase__ :Tuple = [self.eos_token_id, self.cur_lang_code]
| 97
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ = datasets.logging.get_logger(__name__)
A__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="dummy_doc" ) -> int:
"""simple docstring"""
snake_case__ : Dict = {doc: key_lines}
snake_case__ : Any = {doc: sys_lines}
snake_case__ : Dict = {}
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : List[Any] = 0
snake_case__ , snake_case__ : Tuple = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case__ : str = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ : int = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case__ : Union[str, Any] = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
snake_case__ , snake_case__ : Dict = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case__ , snake_case__ : Optional[int] = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case__ : Any = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = {}
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = 0
for name, metric in metrics:
snake_case__ , snake_case__ , snake_case__ : Any = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
snake_case__ : int = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : str = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case__ : List[Any] = line.split()[5]
if not parse_col == "-":
snake_case__ : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) ,codebase_urls=['''https://github.com/ns-moosavi/coval'''] ,reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] ,)
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :str=True ,__lowercase :Optional[int]=False ,__lowercase :Optional[Any]=False ,__lowercase :Tuple=False ):
snake_case__ : Optional[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case__ : Optional[int] = util.check_gold_parse_annotation(__lowercase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case__ : Any = evaluate(
key_lines=__lowercase ,sys_lines=__lowercase ,metrics=__lowercase ,NP_only=__lowercase ,remove_nested=__lowercase ,keep_singletons=__lowercase ,min_span=__lowercase ,)
return score
| 230
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[int] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__a :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""PerceiverFeatureExtractor"""]
lowerCamelCase__ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCAmelCase :
def __init__( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str]=13 , _lowerCAmelCase: Dict=7 , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: str=99 , _lowerCAmelCase: Dict=32 , _lowerCAmelCase: Dict=5 , _lowerCAmelCase: int=4 , _lowerCAmelCase: Optional[Any]=4 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Any=5_12 , _lowerCAmelCase: Optional[Any]=16 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Tuple=0.02 , _lowerCAmelCase: str=3 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: Optional[Any]=None , ):
lowercase :List[str] = parent
lowercase :List[str] = batch_size
lowercase :List[str] = seq_length
lowercase :Dict = is_training
lowercase :Union[str, Any] = use_input_mask
lowercase :Optional[int] = use_token_type_ids
lowercase :Dict = use_labels
lowercase :Any = vocab_size
lowercase :List[Any] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Union[str, Any] = num_attention_heads
lowercase :Any = intermediate_multiple_size
lowercase :str = hidden_act
lowercase :List[Any] = hidden_dropout
lowercase :Optional[Any] = attention_dropout
lowercase :int = weight_tying
lowercase :Tuple = max_position_embeddings
lowercase :str = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Dict = initializer_range
lowercase :Dict = num_labels
lowercase :Dict = num_choices
lowercase :Any = scope
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :Optional[Any] = None
if self.use_input_mask:
lowercase :str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :int = None
if self.use_labels:
lowercase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self: str ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase , lowercase , lowercase , lowercase :Any = self.prepare_config_and_inputs()
lowercase :Tuple = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] ):
lowercase :List[str] = GPTNeoXJapaneseModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
lowercase :Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int ):
lowercase :Dict = True
lowercase :Optional[Any] = GPTNeoXJapaneseModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] ):
lowercase :str = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any ):
lowercase :Optional[Any] = True
lowercase :List[str] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
lowercase :str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
lowercase :Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase :Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase :Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase :List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase :Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
lowercase :List[str] = output_from_no_past["hidden_states"][0]
lowercase :str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["hidden_states"][0]
# select random slice
lowercase :Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase :str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase :List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_a = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_a = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Any = GPTNeoXJapaneseModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase , lowercase , lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase , lowercase , lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# This regression test was failing with PyTorch < 1.3
lowercase , lowercase , lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase :Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase , lowercase , lowercase , lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = "abeja/gpt-neox-japanese-2.7b"
lowercase :Optional[int] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
lowercase :int = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
lowercase :List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
lowercase :Tuple = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase )
lowercase :List[str] = []
for prompt in prompts:
lowercase :Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
lowercase :Union[str, Any] = model.generate(_lowerCAmelCase , max_length=50 )
lowercase :List[str] = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 236
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
__A = 6
__A = 1_2_8
__A = (2, 2, 1_8, 2)
__A = (4, 8, 1_6, 3_2)
elif "large" in model_name:
__A = 1_2
__A = 1_9_2
__A = (2, 2, 1_8, 2)
__A = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__A = window_size
__A = embed_dim
__A = depths
__A = num_heads
return config
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if "encoder.mask_token" in name:
__A = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__A = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__A = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__A = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__A = name.replace("attn" , "attention.self" )
if "norm1" in name:
__A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__A = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__A = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__A = "layernorm.weight"
if name == "encoder.norm.bias":
__A = "layernorm.bias"
if "decoder" in name:
pass
else:
__A = "swin." + name
return name
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(_a )
if "attn_mask" in key:
pass
elif "qkv" in key:
__A = key.split("." )
__A = int(key_split[2] )
__A = int(key_split[4] )
__A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[
dim : dim * 2, :
]
__A = val[-dim:, :]
else:
__A = val[
:dim
]
__A = val[
dim : dim * 2
]
__A = val[
-dim:
]
else:
__A = val
return orig_state_dict
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = torch.load(_a , map_location="cpu" )["model"]
__A = get_swin_config(_a )
__A = SwinForMaskedImageModeling(_a )
model.eval()
__A = convert_state_dict(_a , _a )
model.load_state_dict(_a )
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = ViTImageProcessor(size={"height": 1_9_2, "width": 1_9_2} )
__A = Image.open(requests.get(_a , stream=_a ).raw )
__A = image_processor(images=_a , return_tensors="pt" )
with torch.no_grad():
__A = model(**_a ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_a )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_a )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE :Any = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 364
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124
| 0
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _UpperCAmelCase ( ) -> Any:
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=_lowerCamelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=_lowerCamelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=_lowerCamelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=_lowerCamelCase , default=0 , help="""cuda_id.""" , )
_lowerCAmelCase : str = parser.parse_args()
return args
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ) -> List[Any]:
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = imgs[0].size
_lowerCAmelCase : Tuple = Image.new("""RGB""" , size=(cols * w, rows * h) )
_lowerCAmelCase , _lowerCAmelCase : Any = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]="robotic cat with wings" , _lowerCamelCase : List[Any]=7.5 , _lowerCamelCase : List[Any]=50 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : List[str]=42 , ) -> Optional[int]:
_lowerCAmelCase : int = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
_lowerCAmelCase : int = int(math.sqrt(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCamelCase_ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCamelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
UpperCamelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
UpperCamelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
UpperCamelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCamelCase_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
UpperCamelCase_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
UpperCamelCase_ = unet.to(torch.device("""cuda""", args.cuda_id))
UpperCamelCase_ = pipeline.to(unet.device)
UpperCamelCase_ , UpperCamelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
UpperCamelCase_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a : Tuple = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = ['''pixel_values''']
def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase_: List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase_: Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: Tuple = do_resize
UpperCAmelCase_: Dict = size
UpperCAmelCase_: Dict = resample
UpperCAmelCase_: List[str] = do_rescale
UpperCAmelCase_: Tuple = rescale_factor
UpperCAmelCase_: Tuple = do_center_crop
UpperCAmelCase_: List[Any] = crop_size
UpperCAmelCase_: Tuple = do_flip_channel_order
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: Tuple = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase_: Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_, size=size["""shortest_edge"""], default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Any:
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> np.ndarray:
return flip_channel_order(SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image:
UpperCAmelCase_: Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: int = resample if resample is not None else self.resample
UpperCAmelCase_: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase_: Tuple = size if size is not None else self.size
UpperCAmelCase_: str = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: Dict = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: Dict = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_: Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCAmelCase_: List[Any] = [self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCAmelCase_: str = [self.center_crop(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCAmelCase_: Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase_: str = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: str = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple:
UpperCAmelCase_: Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = target_sizes.numpy()
UpperCAmelCase_: Any = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase_: int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: List[str] = logits.argmax(dim=1 )
UpperCAmelCase_: List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 369
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
_lowerCAmelCase = "ylacombe/bark-small"
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = "en_speaker_1"
_lowerCAmelCase = "This is a test string"
_lowerCAmelCase = "speaker_embeddings_path.json"
_lowerCAmelCase = "speaker_embeddings"
def _snake_case ( self , **_lowerCAmelCase ) -> str:
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_lowerCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase = 35
_lowerCAmelCase = 2
_lowerCAmelCase = 8
_lowerCAmelCase = {
"semantic_prompt": np.ones(_lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
_lowerCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase = os.path.join(self.tmpdirname , "file.npz" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
_lowerCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = BarkProcessor(tokenizer=_lowerCAmelCase )
_lowerCAmelCase = processor(text=self.input_string )
_lowerCAmelCase = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 158
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowerCAmelCase )
self.set_fail_transitions()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _snake_case ( self , _lowerCAmelCase ) -> None:
_lowerCAmelCase = 0
for character in keyword:
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , _lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase = len(self.adlist ) - 1
else:
_lowerCAmelCase = next_state
self.adlist[current_state]["output"].append(_lowerCAmelCase )
def _snake_case ( self ) -> None:
_lowerCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = 0
while q:
_lowerCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowerCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
_lowerCAmelCase = self.adlist[state]["fail_state"]
_lowerCAmelCase = self.find_next_state(
_lowerCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase = 0
_lowerCAmelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _snake_case ( self , _lowerCAmelCase ) -> dict[str, list[int]]:
_lowerCAmelCase = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
while (
self.find_next_state(_lowerCAmelCase , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase = self.adlist[current_state]["fail_state"]
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , string[i] )
if next_state is None:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase = []
result[key].append(i - len(_lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 1
|
import functools
from typing import Any
def A ( lowercase , lowercase ) -> bool:
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or len(lowercase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase , lowercase ) or not all(
isinstance(lowercase , lowercase ) and len(lowercase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
UpperCamelCase = {}
UpperCamelCase = 'WORD_KEEPER'
for word in words:
UpperCamelCase = trie
for c in word:
if c not in trie_node:
UpperCamelCase = {}
UpperCamelCase = trie_node[c]
UpperCamelCase = True
UpperCamelCase = len(lowercase )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase ) -> bool:
if index == len_string:
return True
UpperCamelCase = trie
for i in range(lowercase , lowercase ):
UpperCamelCase = trie_node.get(string[i] , lowercase )
if trie_node is None:
return False
if trie_node.get(lowercase , lowercase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110
|
import pprint
import requests
_UpperCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_UpperCAmelCase : str = random_quotes()
pprint.pprint(response)
| 110
| 1
|
'''simple docstring'''
_lowercase : Any = [
(1_0_0_0, "M"),
(9_0_0, "CM"),
(5_0_0, "D"),
(4_0_0, "CD"),
(1_0_0, "C"),
(9_0, "XC"),
(5_0, "L"),
(4_0, "XL"),
(1_0, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Tuple = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = 0
while place < len(__SCREAMING_SNAKE_CASE ):
if (place + 1 < len(__SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : str = []
for arabic, roman in ROMAN:
((lowercase_) , (lowercase_)) : Optional[int] = divmod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93
| 1
|
import requests
SCREAMING_SNAKE_CASE_ = """YOUR API KEY"""
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = giphy_api_key ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """+""".join(query.split() )
SCREAMING_SNAKE_CASE = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
SCREAMING_SNAKE_CASE = requests.get(_SCREAMING_SNAKE_CASE ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 193
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = DistilBertTokenizer
UpperCAmelCase__ = DistilBertTokenizerFast
UpperCAmelCase__ = True
@slow
def A_ ( self : str ) -> Any:
lowerCamelCase__ : List[str] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
lowerCamelCase__ : Dict = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 50
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Optional[int] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''vivit'''
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=32 , lowerCAmelCase__=[2, 16, 16] , lowerCAmelCase__=3 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = num_frames
__lowercase = tubelet_size
__lowercase = num_channels
__lowercase = qkv_bias
super().__init__(**lowerCAmelCase__ )
| 210
| 0
|
import requests
from bsa import BeautifulSoup
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
lowercase__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 93
|
def _a ( SCREAMING_SNAKE_CASE = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93
| 1
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = BarthezTokenizer
_snake_case : str = BarthezTokenizerFast
_snake_case : Union[str, Any] = True
_snake_case : List[str] = True
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
_UpperCamelCase = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 101122 )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def snake_case__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCamelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCAmelCase__ , )
| 324
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : str = StableDiffusionSAGPipeline
A : Any = TEXT_TO_IMAGE_PARAMS
A : str = TEXT_TO_IMAGE_BATCH_PARAMS
A : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
A : int = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
lowercase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE__ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = '''.'''
lowercase : Tuple = torch.manual_seed(0 )
lowercase : Optional[int] = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Optional[Any] = output.images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCamelCase ( self ):
lowercase : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : int = sag_pipe.to(SCREAMING_SNAKE_CASE__ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : int = '''.'''
lowercase : List[str] = torch.manual_seed(0 )
lowercase : List[str] = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase : Any = output.images
lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[Any] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __lowerCamelCase ( self ):
lowercase : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase : str = sag_pipe.to(SCREAMING_SNAKE_CASE__ )
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = '''.'''
lowercase : List[str] = torch.manual_seed(0 )
lowercase : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowercase : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 370
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 173
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _snake_case :
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]:
return None
class _snake_case :
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> Tuple:
return None
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'tf' , 12 , **a)
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'pt' , 12 , **a)
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
from transformers import BertModel
SCREAMING_SNAKE_CASE = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t') as vocab_file:
vocab_file.write('\n'.join(a))
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(a)))
model.save_pretrained(a)
self._test_export(a , 'pt' , 12 , a)
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(a , 'tf' , 12 , **a)
SCREAMING_SNAKE_CASE = quantize(Path(a))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model')
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(a , 'pt' , 12 , **a)
SCREAMING_SNAKE_CASE = quantize(a)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , **a) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(a).joinpath('model.onnx')
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a , a , a , a , a , **a)
return path
except Exception as e:
self.fail(a)
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random'))
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random')
self._test_infer_dynamic_axis(a , a , 'pt')
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random'))
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random')
self._test_infer_dynamic_axis(a , a , 'tf')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(a , a)
SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(a , a)
# Assert all variables are present
self.assertEqual(len(a) , len(a))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3] , a)
self.assertSequenceEqual(variable_names[3:] , a)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'})
self.assertDictEqual(shapes['output_1'] , {0: 'batch'})
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask', 'token_type_ids']
SCREAMING_SNAKE_CASE = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , a , a)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a) , 3)
# Should have exactly the same input names
self.assertEqual(set(a) , set(a))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , a , a)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a) , 1)
self.assertEqual(len(a) , 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'])
self.assertEqual(ordered_input_names[0] , 'input_ids')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = generate_identified_filename(Path('/home/something/my_fake_model.onnx') , '-test')
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix())
| 137
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
a_ : Tuple = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
a_ : Optional[int] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = cached_file(a , a)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(a))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(a , a)))
with open(os.path.join(a , 'refs' , 'main')) as f:
SCREAMING_SNAKE_CASE = f.read()
self.assertEqual(a , os.path.join(a , 'snapshots' , a , a))
self.assertTrue(os.path.isfile(a))
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE = cached_file(a , a)
self.assertEqual(a , a)
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE = cached_file(a , a , revision='9b8c223')
self.assertEqual(a , os.path.join(a , 'snapshots' , a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(a , 'is not a valid model identifier'):
SCREAMING_SNAKE_CASE = cached_file('tiny-random-bert' , a)
with self.assertRaisesRegex(a , 'is not a valid git identifier'):
SCREAMING_SNAKE_CASE = cached_file(a , a , revision='aaaa')
with self.assertRaisesRegex(a , 'does not appear to have a file named'):
SCREAMING_SNAKE_CASE = cached_file(a , 'conf')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
with self.assertRaisesRegex(a , 'does not appear to have a file named'):
SCREAMING_SNAKE_CASE = cached_file(a , 'conf')
with open(os.path.join(a , 'refs' , 'main')) as f:
SCREAMING_SNAKE_CASE = f.read()
self.assertTrue(os.path.isfile(os.path.join(a , '.no_exist' , a , 'conf')))
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , _raise_exceptions_for_missing_entries=a)
self.assertIsNone(a)
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , local_files_only=a , _raise_exceptions_for_missing_entries=a)
self.assertIsNone(a)
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a) as mock_head:
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , _raise_exceptions_for_connection_errors=a)
self.assertIsNone(a)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(a , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , a)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(a , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , a , revision='ahaha')
SCREAMING_SNAKE_CASE = get_file_from_repo('bert-base-cased' , a)
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE = json.loads(open(a , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = Path(a) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(a , 'a.txt') , str(a))
self.assertIsNone(get_file_from_repo(a , 'b.txt'))
| 137
| 1
|
"""simple docstring"""
class __magic_name__ : # Public class to implement a graph
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : list[list[bool]] ):
'''simple docstring'''
lowercase :List[str] = row
lowercase :List[Any] = col
lowercase :int = graph
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : list[list[bool]] ):
'''simple docstring'''
lowercase :int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase :Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase :Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , snake_case__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , snake_case__ )
def __snake_case ( self : int ): # And finally, count all islands.
'''simple docstring'''
lowercase :Any = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase :Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(snake_case__ , snake_case__ , snake_case__ )
count += 1
return count
| 172
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
UpperCAmelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[str] = ["input_ids"]
__A : Optional[Any] = VOCAB_FILES_NAMES
__A : str = PRETRAINED_INIT_CONFIGURATION
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = RESOURCE_FILES_NAMES
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : List[Any]=None , snake_case__ : int=False , snake_case__ : Optional[int]="utf8" , snake_case__ : List[str]="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : List[Any]="[PAD]" , snake_case__ : Dict="[CLS]" , snake_case__ : Dict="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : str , ):
'''simple docstring'''
lowercase :Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , vocab_file=snake_case__ , encoding=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowercase :Dict = do_lower_case
lowercase :str = sentencepiece_model_ckpt
lowercase :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase :Tuple = self.load_vocab(filepath=snake_case__ )
else:
lowercase :str = {self.sp_model.id_to_piece(snake_case__ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase :Any = {v: k for k, v in self.vocab.items()}
def __snake_case ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
if text is None:
return None
lowercase :List[Any] = self.tokenize(snake_case__ )
lowercase , lowercase :List[str] = '''''', []
for i, ch in enumerate(snake_case__ ):
if ch in self.SP_CHAR_MAPPING:
lowercase :Optional[int] = self.SP_CHAR_MAPPING.get(snake_case__ )
else:
lowercase :Optional[int] = unicodedata.normalize('''NFKC''' , snake_case__ )
if self.is_whitespace(snake_case__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case__ ) )
lowercase , lowercase , lowercase :int = normalized_text, [], 0
if self.do_lower_case:
lowercase :Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase :Tuple = token[1:]
lowercase :List[str] = text[offset:].index(snake_case__ ) + offset
lowercase :Tuple = start + len(snake_case__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase :int = end
return token_mapping
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return len(self.vocab )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
lowercase :Any = self.__dict__.copy()
lowercase :Optional[int] = None
return state
def __setstate__( self : Tuple , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase :Dict = {}
lowercase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __snake_case ( self : int , snake_case__ : List[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case__ , snake_case__ ) for c in text) )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : int=False , snake_case__ : Dict=6_4 , snake_case__ : Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase :Any = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase :Any = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase :Optional[Any] = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase :Any = self.sp_model.EncodeAsPieces(snake_case__ )
else:
lowercase :List[Any] = self.sp_model.SampleEncodeAsPieces(snake_case__ , snake_case__ , snake_case__ )
lowercase :str = []
for pi, piece in enumerate(snake_case__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case__ ) and pi != 0:
new_pieces.append(snake_case__ )
continue
else:
continue
lowercase :int = 0
for i, chunk in enumerate(snake_case__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case__ ) or self.is_punct(snake_case__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case__ )
lowercase :Optional[int] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase :str = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase :Dict = i
if len(snake_case__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __snake_case ( self : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :int = ''''''.join(snake_case__ ).replace(snake_case__ , ''' ''' ).strip()
return out_string
def __snake_case ( self : int , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.convert_ids_to_tokens(snake_case__ )
lowercase :Any = ''''''.join(snake_case__ ).replace(snake_case__ , ''' ''' ).strip()
return out_string
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case__ , self.unk_token )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :int = [self.cls_token_id]
lowercase :str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __snake_case ( self : Any , snake_case__ : Dict , snake_case__ : str=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __snake_case ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def __snake_case ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case__ ) + 1) + [1] * (len(snake_case__ ) + 3)
def __snake_case ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __snake_case ( self : List[str] , snake_case__ : Any ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __snake_case ( self : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __snake_case ( self : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case__ ) == 1:
lowercase :str = unicodedata.category(snake_case__ )
if cat == "Zs":
return True
return False
def __snake_case ( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Dict = {}
with io.open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(snake_case__ ):
lowercase :Dict = line.rstrip('''\n''' )
lowercase :str = int(snake_case__ )
return token_to_idx
def __snake_case ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :Optional[int] = 0
if os.path.isdir(snake_case__ ):
lowercase :str = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase :Any = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase :Optional[int] = token_index
writer.write(token + '''\n''' )
index += 1
lowercase :int = os.path.join(snake_case__ , '''sentencepiece.bpe.model''' )
with open(snake_case__ , '''wb''' ) as fi:
lowercase :Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (vocab_file,)
| 172
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any]=13 ,lowerCamelCase_: Optional[Any]=7 ,lowerCamelCase_: int=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: List[str]=True ,lowerCamelCase_: Dict=99 ,lowerCamelCase_: Optional[Any]=64 ,lowerCamelCase_: Dict=32 ,lowerCamelCase_: str=5 ,lowerCamelCase_: Any=4 ,lowerCamelCase_: List[str]=37 ,lowerCamelCase_: str="gelu" ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Tuple=512 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: Tuple=0.0_2 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Dict=None ,) -> Optional[Any]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : str = embedding_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : Optional[int] = scope
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Dict ) -> Tuple:
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ) -> int:
UpperCAmelCase_ : int = MegatronBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : str = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ) -> Tuple:
UpperCAmelCase_ : Dict = MegatronBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Any ) -> Any:
UpperCAmelCase_ : Union[str, Any] = MegatronBertForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: str ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = MegatronBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = MegatronBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : List[str] = MegatronBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ) -> Optional[int]:
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MegatronBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict ) -> List[str]:
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : List[str] = MegatronBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.num_choices
UpperCAmelCase_ : Union[str, Any] = MegatronBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Dict = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Optional[int] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
# test_resize_embeddings = False
A__ : int = False
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Any = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = MegatronBertModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[int] ) -> int:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase_ )
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Any ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def A__ ( self: Dict ) -> Any:
UpperCAmelCase_ : Any = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
UpperCAmelCase_ : Union[str, Any] = os.path.join(os.environ["""MYDIR"""] ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = MegatronBertModel.from_pretrained(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.half()
UpperCAmelCase_ : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : int = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : str = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase_ : Optional[Any] = output[0, ii, jj]
UpperCAmelCase_ : List[str] = expected[3 * ii + jj]
UpperCAmelCase_ : Optional[int] = """ii={} jj={} a={} b={}""".format(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
self.assertTrue(math.isclose(lowerCamelCase_ ,lowerCamelCase_ ,rel_tol=lowerCamelCase_ ,abs_tol=lowerCamelCase_ ) ,msg=lowerCamelCase_ )
| 345
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345
| 1
|
import sys
_snake_case = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = 1
for digit in s:
product *= int(UpperCAmelCase_ )
return product
def lowerCAmelCase_ ( snake_case_ = N ):
_A : Tuple = -sys.maxsize - 1
_A : Optional[int] = n[:13]
_A : Optional[int] = 13
while cur_index < len(UpperCAmelCase_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_A : int = substr[1:] + n[cur_index]
cur_index += 1
else:
_A : List[str] = max(UpperCAmelCase_,str_eval(UpperCAmelCase_ ) )
_A : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 353
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case =logging.getLogger(__name__)
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict ):
return (preds == labels).mean()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
lowerCamelCase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
lowerCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase = processors[data_args.task_name]()
lowerCAmelCase = processor.get_labels()
lowerCAmelCase = len(lowerCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase : EvalPrediction ) -> Dict:
lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase , p.label_ids )}
# Data collator
lowerCAmelCase = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase = trainer.evaluate()
lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , lowerCamelCase , lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(lowerCamelCase )
return results
def a_ ( lowerCamelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 4
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : Dict = analyze_text(lowercase_ )
_UpperCamelCase : int = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCamelCase : Tuple = sum(single_char_strings.values() )
# one length string
_UpperCamelCase : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCamelCase : Tuple = single_char_strings[ch]
_UpperCamelCase : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_UpperCamelCase : List[str] = sum(two_char_strings.values() )
_UpperCamelCase : List[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCamelCase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_UpperCamelCase : Any = two_char_strings[sequence]
_UpperCamelCase : Tuple = int(lowercase_ ) / all_sum
my_sec_sum += prob * math.loga(lowercase_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowercase__ ( lowercase_ ) -> tuple[dict, dict]:
"""simple docstring"""
_UpperCamelCase : List[str] = Counter() # type: ignore
_UpperCamelCase : str = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(lowercase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , **_A ):
'''simple docstring'''
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 257
|
from heapq import heappop, heappush
import numpy as np
def __lowercase ( a__ , a__ , a__ , a__ , ) -> tuple[float | int, list[tuple[int, int]]]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = grid.shape
__SCREAMING_SNAKE_CASE = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [(0, source)], set()
__SCREAMING_SNAKE_CASE = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = np.empty((rows, cols) , dtype=a__ )
__SCREAMING_SNAKE_CASE = None
while queue:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heappop(a__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = predecessors[x, y]
path.append(a__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a__ ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a__ , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE = dist + 1
__SCREAMING_SNAKE_CASE = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
| 1
|
import numpy as np
import qiskit
def UpperCAmelCase_ ( __UpperCAmelCase : int = 8 , __UpperCAmelCase : int | None = None ) -> str:
SCREAMING_SNAKE_CASE_ : Tuple = np.random.default_rng(seed=UpperCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE_ : Dict = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE_ : int = rng.integers(2 , size=UpperCAmelCase__ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE_ : Dict = rng.integers(2 , size=UpperCAmelCase__ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE_ : Tuple = rng.integers(2 , size=UpperCAmelCase__ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE_ : int = qiskit.QuantumCircuit(UpperCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE_ : int = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE_ : List[str] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1 , seed_simulator=UpperCAmelCase__ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE_ : Any = job.result().get_counts(UpperCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_key[:key_len] if len(UpperCAmelCase__ ) >= key_len else gen_key.ljust(UpperCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 371
|
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = psutil.Process()
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = -1
while True:
SCREAMING_SNAKE_CASE_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_ = True
self.thread.start()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ : List[str] = PeakCPUMemory()
def UpperCAmelCase_ ( ) -> Tuple:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
SCREAMING_SNAKE_CASE_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
SCREAMING_SNAKE_CASE_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
SCREAMING_SNAKE_CASE_ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 210
| 0
|
from ..utils import DummyObject, requires_backends
class __A( metaclass=a ):
snake_case_ = ['''speech''']
def __init__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
class __A( metaclass=a ):
snake_case_ = ['''speech''']
def __init__( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
| 6
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_SCREAMING_SNAKE_CASE : List[str] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_INIT_CONFIGURATION
a = RoFormerTokenizer
def __init__( self : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : int="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> Dict:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return state
def __setstate__( self : int , __lowerCamelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = self.__dict__['''_tokenizer'''].get_vocab()
SCREAMING_SNAKE_CASE__ = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def lowercase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=False , **__lowerCamelCase : Tuple , ) -> int:
SCREAMING_SNAKE_CASE__ = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
return (preds == labels).mean()
@dataclass
class UpperCAmelCase :
__lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class UpperCAmelCase :
__lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
__lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__, A__, A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(_lowerCamelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowerCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _lowerCamelCase , _lowerCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(_lowerCamelCase )
return results
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 123
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : Any ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
a : Optional[int] = logging.get_logger(__name__)
a : Dict = "T5Config"
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'mt5'
a : Dict = MTaConfig
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'mt5'
a : int = MTaConfig
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'mt5'
a : str = MTaConfig
| 114
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = "src/diffusers"
a : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
a : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : List[str] = spec.loader.load_module()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __lowerCamelCase ) is not None
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = object_name.split(""".""" )
__UpperCAmelCase : List[Any] = 0
# First let's find the module where our object lives.
__UpperCAmelCase : Optional[Any] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__UpperCAmelCase : List[str] = """"""
__UpperCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCAmelCase : List[str] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
a : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a : Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
a : Dict = re.compile(r"<FILL\s+[^>]*>")
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[Any] = code.split("""\n""" )
__UpperCAmelCase : str = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Tuple = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__UpperCAmelCase : Optional[Any] = f"""class Bla:\n{code}"""
__UpperCAmelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCamelCase )
__UpperCAmelCase : Dict = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = style_docstrings_in_code(__lowerCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=False ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__UpperCAmelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = search.groups()
__UpperCAmelCase : Any = find_code_in_diffusers(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = get_indent(__lowerCamelCase )
__UpperCAmelCase : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCAmelCase : Any = theoretical_indent
__UpperCAmelCase : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCAmelCase : int = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__UpperCAmelCase : List[Any] = lines[line_index]
__UpperCAmelCase : str = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Optional[int] = lines[start_index:line_index]
__UpperCAmelCase : int = """""".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCAmelCase : Tuple = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__UpperCAmelCase : List[Any] = """\n""".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__UpperCAmelCase : Any = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pattern.groups()
__UpperCAmelCase : List[str] = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__UpperCAmelCase : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__UpperCAmelCase : int = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__UpperCAmelCase : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCAmelCase : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCAmelCase : Union[str, Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ ( __lowerCamelCase : bool = False ):
__UpperCAmelCase : Tuple = glob.glob(os.path.join(__lowerCamelCase , """**/*.py""" ) , recursive=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for filename in all_files:
__UpperCAmelCase : str = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = """\n""".join(__lowerCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 114
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase ( a_ , a_=10 ) -> Optional[int]:
lowerCAmelCase_ = []
for _ in range(a_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase ( a_ , a_=10 ) -> Optional[Any]:
lowerCAmelCase_ = []
for step in range(a_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = os.path.join(a_ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a_ )
lowerCAmelCase_ = torch.load(a_ )
scheduler.load_state_dict(a_ )
return lrs
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
lowerCAmelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase_ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase_ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
lowerCAmelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase_ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase_ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Tuple = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
__a: Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
__a: str = 1_0
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase_ , lowerCAmelCase_ = data
lowerCAmelCase_ = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase_ = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase_ = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
lowerCAmelCase_ = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=f'''failed for {scheduler_func} in save and reload''' )
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = fn
def __call__( self , *lowercase_ , **lowercase_ ) -> List[str]:
'''simple docstring'''
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def _lowercase ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = list(map(self , scheduler.lr_lambdas ) )
| 353
|
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower()
lowerCAmelCase_ = queue.Queue()
lowerCAmelCase_ = TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = left_node
q.put(a_ )
lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = right_node
q.put(a_ )
raise
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = []
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(a_ )
lowerCAmelCase_ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ = stack.pop()
# start to traverse its right child
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n:
stack.append(a_ )
lowerCAmelCase_ = n.left
lowerCAmelCase_ = stack.pop()
print(n.data , end=',' )
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ = node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 14
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase : int = {'facebook/blenderbot_small-90M': 5_12}
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = set()
lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase = char
lowerCamelCase = set(lowerCamelCase__ )
return pairs
class __lowercase ( _a ):
"""simple docstring"""
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , A , A , A="__start__" , A="__end__" , A="__unk__" , A="__null__" , **A , ) -> int:
'''simple docstring'''
super().__init__(unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__ )
with open(lowercase__ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(lowercase__ )
lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase__ , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase = [tuple(merge.split() ) for merge in merges]
lowerCamelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
lowerCamelCase = {}
@property
def __A ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def __A ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , A ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase = re.sub("""([.,!?()])""" , r""" \1""" , lowercase__ )
lowerCamelCase = re.sub("""(\')""" , r""" \1 """ , lowercase__ )
lowerCamelCase = re.sub(r"""\s{2,}""" , """ """ , lowercase__ )
if "\n" in token:
lowerCamelCase = token.replace("""\n""" , """ __newln__""" )
lowerCamelCase = token.split(""" """ )
lowerCamelCase = []
for token in tokens:
if not len(lowercase__ ):
continue
lowerCamelCase = token.lower()
lowerCamelCase = tuple(lowercase__ )
lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCamelCase = get_pairs(lowercase__ )
if not pairs:
words.append(lowercase__ )
continue
while True:
lowerCamelCase = min(lowercase__ , key=lambda A : self.bpe_ranks.get(lowercase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase = bigram
lowerCamelCase = []
lowerCamelCase = 0
while i < len(lowercase__ ):
try:
lowerCamelCase = word.index(lowercase__ , lowercase__ )
new_word.extend(word[i:j] )
lowerCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase = tuple(lowercase__ )
lowerCamelCase = new_word
if len(lowercase__ ) == 1:
break
else:
lowerCamelCase = get_pairs(lowercase__ )
lowerCamelCase = """@@ """.join(lowercase__ )
lowerCamelCase = word[:-4]
lowerCamelCase = word
words.append(lowercase__ )
return " ".join(lowercase__ )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = re.findall(r"""\S+\n?""" , lowercase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase__ ).split(""" """ ) ) )
return split_tokens
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = token.lower()
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) )
def __A ( self , A ) -> str:
'''simple docstring'''
return self.decoder.get(lowercase__ , self.unk_token )
def __A ( self , A ) -> str:
'''simple docstring'''
lowerCamelCase = """ """.join(lowercase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + """\n""" )
lowerCamelCase = 0
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase = token_index
writer.write(""" """.join(lowercase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 252
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 4000000 ):
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_snake_case )
lowerCAmelCase, lowerCAmelCase : List[Any] = b, a + b
return sum(_snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ = [[1, 2, 4], [1, 2, 3, 4]]
A__ = DisjunctiveConstraint(snake_case_ )
self.assertTrue(isinstance(dc.token_ids , snake_case_ ) )
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint(snake_case_ ) # fails here
def __magic_name__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ = [[1, 2, 3], [1, 2, 4]]
A__ = DisjunctiveConstraint(snake_case_ )
A__, A__, A__ = dc.update(1 )
A__ = stepped is True and completed is False and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__, A__, A__ = dc.update(2 )
A__ = stepped is True and completed is False and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__, A__, A__ = dc.update(3 )
A__ = stepped is True and completed is True and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __magic_name__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A__ = DisjunctiveConstraint(snake_case_ )
A__, A__, A__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__, A__, A__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__, A__, A__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
A__, A__, A__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
A__, A__, A__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
A__, A__, A__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
A__, A__, A__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 247
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=32 , snake_case_ : List[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Tuple=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=10 , snake_case_ : str=0.02 , snake_case_ : str=["stage2", "stage3", "stage4"] , snake_case_ : str=3 , snake_case_ : List[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(snake_case_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
A__ = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 247
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase :Union[str, Any] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Tuple = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Any = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Tuple = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 240
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase :int = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE : str = "utf-8"
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : bool = True # deprecated
SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE : int = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE : Optional[bool] = None
class a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = JsonConfig
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__UpperCAmelCase : Tuple = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : Dict , snake_case : Tuple ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__UpperCAmelCase : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
__UpperCAmelCase : Dict = data_files
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : List[Any] = [files]
__UpperCAmelCase : Tuple = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : Any = [files]
__UpperCAmelCase : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase__ ( self : List[str] , snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__UpperCAmelCase : Any = self.config.features.arrow_schema.field(snake_case ).type
__UpperCAmelCase : Dict = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : Tuple = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self : Tuple , snake_case : Any ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase : Optional[int] = json.load(snake_case )
# We keep only the field we are interested in
__UpperCAmelCase : int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case , (list, tuple) ):
__UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase : Union[str, Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
else:
__UpperCAmelCase : Optional[int] = dataset
__UpperCAmelCase : Tuple = pa.Table.from_pydict(snake_case )
yield file_idx, self._cast_table(snake_case )
# If the file has one json object per line
else:
with open(snake_case , '''rb''' ) as f:
__UpperCAmelCase : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCAmelCase : int = max(self.config.chunksize // 32 , 16 << 10 )
__UpperCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__UpperCAmelCase : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCAmelCase : Union[str, Any] = batch.decode(self.config.encoding , errors=snake_case ).encode('''utf-8''' )
try:
while True:
try:
__UpperCAmelCase : List[str] = paj.read_json(
io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case , pa.ArrowInvalid )
and "straddling" not in str(snake_case )
or block_size > len(snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(snake_case )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase : Optional[Any] = json.load(snake_case )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON
try:
__UpperCAmelCase : Dict = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase : Optional[Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
__UpperCAmelCase : Union[str, Any] = pa.Table.from_pydict(snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(snake_case )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
batch_idx += 1
| 240
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A = random.Random()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :List[Any] = global_rng
lowerCAmelCase__ :List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=2_0_0_0 , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_4_1_0_0 , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :Union[str, Any] = min_seq_length
lowerCAmelCase__ :Optional[Any] = max_seq_length
lowerCAmelCase__ :Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ :Dict = spectrogram_length
lowerCAmelCase__ :int = feature_size
lowerCAmelCase__ :int = num_audio_channels
lowerCAmelCase__ :int = hop_length
lowerCAmelCase__ :Optional[Any] = chunk_length
lowerCAmelCase__ :int = sampling_rate
def snake_case ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
'''simple docstring'''
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
lowerCAmelCase__ :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ :List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ :List[Any] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = TvltFeatureExtractor
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = TvltFeatureExtractionTester(self )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'hop_length' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'chunk_length' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'sampling_rate' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :int = feat_extract_first.save_pretrained(__UpperCAmelCase )[0]
check_json_file_has_correct_format(__UpperCAmelCase )
lowerCAmelCase__ :str = self.feature_extraction_class.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = feat_extract_first.to_dict()
lowerCAmelCase__ :List[str] = feat_extract_second.to_dict()
lowerCAmelCase__ :int = dict_first.pop('mel_filters' )
lowerCAmelCase__ :Optional[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :List[Any] = os.path.join(__UpperCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.feature_extraction_class.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = feat_extract_first.to_dict()
lowerCAmelCase__ :Union[str, Any] = feat_extract_second.to_dict()
lowerCAmelCase__ :Any = dict_first.pop('mel_filters' )
lowerCAmelCase__ :Optional[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase__ :Union[str, Any] = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ :List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase__ :List[Any] = feature_extractor(__UpperCAmelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase__ :str = feature_extractor(
__UpperCAmelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=__UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ :int = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase__ :str = np.asarray(__UpperCAmelCase )
lowerCAmelCase__ :int = feature_extractor(__UpperCAmelCase , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ :List[Any] = ds.sort('id' ).select(range(__UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self._load_datasamples(1 )
lowerCAmelCase__ :Union[str, Any] = TvltFeatureExtractor()
lowerCAmelCase__ :str = feature_extractor(__UpperCAmelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
lowerCAmelCase__ :Tuple = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCAmelCase , atol=1E-4 ) )
| 293
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :int = self.get_dummy_components()
lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Tuple = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 293
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Dict = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = analyze_text(_lowercase )
__UpperCamelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__UpperCamelCase = sum(single_char_strings.values() )
# one length string
__UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCamelCase = single_char_strings[ch]
__UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__UpperCamelCase = sum(two_char_strings.values() )
__UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCamelCase = cha + cha
if sequence in two_char_strings:
__UpperCamelCase = two_char_strings[sequence]
__UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _A ( _lowercase ) -> tuple[dict, dict]:
"""simple docstring"""
__UpperCamelCase = Counter() # type: ignore
__UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _A ( ) -> Optional[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
_lowercase : List[Any] = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
_lowercase : Dict = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
_lowercase : Optional[Any] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["""PoolFormerFeatureExtractor"""]
_lowercase : Any = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 91
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( a_ ,a_ ,a_ ) -> Tuple:
__UpperCamelCase : List[Any] =TaConfig.from_json_file(a_ )
print(F'Building PyTorch model from configuration: {config}' )
__UpperCamelCase : List[str] =TaForConditionalGeneration(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a_ ,a_ ,a_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a_ )
if __name__ == "__main__":
A_ :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ :int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 71
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCamelCase :
"""simple docstring"""
__a : Tuple = OPTConfig
__a : int = {}
__a : Dict = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = embed_dim
__lowercase = word_embed_proj_dim
__lowercase = False
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__lowercase = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = TFOPTModel(config=lowerCAmelCase__ )
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = 1
# first forward pass
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
@require_tf
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a : Dict = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a : List[str] = False
__a : Optional[Any] = False
__a : Union[str, Any] = False
__a : List[Any] = 10
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = TFOPTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase = model_class(config=lowerCAmelCase__ )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__ )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__ )
# check that weights remain the same after resizing
__lowercase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowerCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__ )
__lowercase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : List[str] = 99
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase = input_ids.shape[0]
__lowercase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowercase = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__lowercase = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).last_hidden_state
__lowercase = (1, 11, 5_12)
self.assertEqual(output.shape , lowerCAmelCase__ )
__lowercase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3 ) )
__lowercase = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__lowercase = xla_generate(lowerCAmelCase__ , lowerCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2 ) )
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = '''facebook/opt-350m'''
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__lowercase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
__lowercase = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__lowercase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''facebook/opt-125m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowerCAmelCase__ , max_length=10 )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
__lowercase = '''left'''
# use different length sentences to test batching
__lowercase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ )
__lowercase = inputs['''input_ids''']
__lowercase = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''] )
__lowercase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowerCAmelCase__ )
__lowercase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__lowercase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowerCAmelCase__ , max_length=10 )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 210
| 0
|
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = 1_0
def _lowerCAmelCase ( lowerCamelCase_ : list[int] ):
__lowercase = 1
__lowercase = max(lowerCAmelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowercase = [[] for _ in range(lowerCAmelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowercase = int((i / placement) % RADIX )
buckets[tmp].append(lowerCAmelCase__ )
# put each buckets' contents into list_of_ints
__lowercase = 0
for b in range(lowerCAmelCase__ ):
for i in buckets[b]:
__lowercase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0 ):
__lowercase = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 217
| 0
|
"""simple docstring"""
from collections import defaultdict
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :str = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowercase__ )
if ret % 2 == 0:
cuts.append(lowercase__ )
return ret
def _snake_case ( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase = 10, 9
__UpperCAmelCase = defaultdict(list)
__UpperCAmelCase = {}
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 84
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Tuple , _lowercase : Optional[int]=None , **_lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 219
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Dict=18 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : str=400 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , ) -> Any:
_a : Optional[Any] = size if size is not None else {"""shortest_edge""": 18}
_a : Optional[int] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_a : int = parent
_a : Union[str, Any] = batch_size
_a : Union[str, Any] = num_channels
_a : Any = image_size
_a : Optional[int] = min_resolution
_a : str = max_resolution
_a : List[Any] = do_resize
_a : Union[str, Any] = size
_a : List[str] = do_center_crop
_a : List[Any] = crop_size
_a : Tuple = do_normalize
_a : Any = image_mean
_a : Tuple = image_std
def _lowercase ( self : List[str] ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = LevitImageProcessor if is_vision_available() else None
def _lowercase ( self : Any ) -> int:
_a : Any = LevitImageProcessingTester(self )
@property
def _lowercase ( self : Dict ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Optional[Any] ) -> List[str]:
_a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , """size""" ) )
def _lowercase ( self : str ) -> Optional[Any]:
_a : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
_a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
_a : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : List[str] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
_a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
_a : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : List[Any] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowercase ( self : str ) -> List[Any]:
# Initialize image_processing
_a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
_a : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : Optional[int] = image_processing(UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 324
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = (DDPMScheduler,)
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Any ):
snake_case__ : Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__ )
return config
def __lowerCamelCase ( self :Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __lowerCamelCase ( self :Dict ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ ,beta_end=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __lowerCamelCase ( self :str ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ ,prediction_type=UpperCAmelCase__ ,sample_max_value=UpperCAmelCase__ ,)
def __lowerCamelCase ( self :Dict ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[Any] ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**UpperCAmelCase__ )
snake_case__ : int = len(UpperCAmelCase__ )
snake_case__ : List[str] = self.dummy_model()
snake_case__ : int = self.dummy_sample_deter
snake_case__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
snake_case__ : Union[str, Any] = model(UpperCAmelCase__ ,UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
snake_case__ : List[Any] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ : List[Any] = pred_prev_sample
snake_case__ : Dict = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case__ : Dict = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**UpperCAmelCase__ )
snake_case__ : Any = len(UpperCAmelCase__ )
snake_case__ : int = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter
snake_case__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
snake_case__ : Optional[Any] = model(UpperCAmelCase__ ,UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
snake_case__ : List[str] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ : Tuple = pred_prev_sample
snake_case__ : Tuple = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case__ : str = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**UpperCAmelCase__ )
snake_case__ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
snake_case__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
snake_case__ : Dict = -1
else:
snake_case__ : int = timesteps[i + 1]
snake_case__ : Any = scheduler.previous_timestep(UpperCAmelCase__ )
snake_case__ : List[Any] = prev_t.item()
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def __lowerCamelCase ( self :str ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Dict = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
snake_case__ : Optional[Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__ ,msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : Optional[int] = scheduler_class(**UpperCAmelCase__ )
snake_case__ : str = [1_0_0, 8_7, 5_0, 1, 0]
snake_case__ : Union[str, Any] = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ ,timesteps=UpperCAmelCase__ )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = self.scheduler_classes[0]
snake_case__ : Union[str, Any] = self.get_scheduler_config()
snake_case__ : Dict = scheduler_class(**UpperCAmelCase__ )
snake_case__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 230
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ) -> Tuple:
'''simple docstring'''
_A = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_A = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_A , _A = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_A = parse_unknown_args(__lowercase )
# Run
_A = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 174
| 1
|
def UpperCAmelCase_ ( _A = 4_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = b, a + b
return sum(_A )
if __name__ == "__main__":
print(F"{solution() = }")
| 314
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 1
|
def A__ ( lowerCamelCase = 10_00 ) -> int:
UpperCamelCase_, UpperCamelCase_: List[str] = 1, 1
UpperCamelCase_: int = 2
while True:
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Dict = fa + fa
UpperCamelCase_, UpperCamelCase_: Optional[Any] = fa, f
index += 1
for _ in str(lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 223
|
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(lowerCamelCase ) * abs(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 223
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
a__ = XCLIPTextConfig()
# derive patch size from model name
a__ = model_name.find('patch' )
a__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
a__ = XCLIPVisionConfig(patch_size=__lowerCAmelCase , num_frames=__lowerCAmelCase )
if "large" in model_name:
a__ = 7_6_8
a__ = 3_0_7_2
a__ = 1_2
a__ = 1_0_2_4
a__ = 4_0_9_6
a__ = 1_6
a__ = 2_4
a__ = 7_6_8
a__ = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
a__ = 3_3_6
a__ = XCLIPConfig.from_text_vision_configs(__lowerCAmelCase , __lowerCAmelCase )
if "large" in model_name:
a__ = 7_6_8
return config
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
# text encoder
if name == "token_embedding.weight":
a__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
a__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
a__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
a__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
a__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
a__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
a__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
a__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
a__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
a__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
a__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
a__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
a__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
a__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
a__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
a__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
a__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
a__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
a__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
a__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
a__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
a__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(__lowerCAmelCase )
if "attn.in_proj" in key:
a__ = key.split('.' )
if key.startswith('visual' ):
a__ = key_split[3]
a__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
a__ = val[
:dim, :
]
a__ = val[
dim : dim * 2, :
]
a__ = val[
-dim:, :
]
else:
a__ = val[
:dim
]
a__ = val[
dim : dim * 2
]
a__ = val[
-dim:
]
else:
if "weight" in key:
a__ = val[
:dim, :
]
a__ = val[
dim : dim * 2, :
]
a__ = val[
-dim:, :
]
else:
a__ = val[:dim]
a__ = val[
dim : dim * 2
]
a__ = val[-dim:]
elif key.startswith('mit' ):
a__ = key_split[2]
a__ = config.vision_config.mit_hidden_size
if "weight" in key:
a__ = val[:dim, :]
a__ = val[dim : dim * 2, :]
a__ = val[-dim:, :]
else:
a__ = val[:dim]
a__ = val[dim : dim * 2]
a__ = val[-dim:]
else:
a__ = key_split[2]
a__ = config.text_config.hidden_size
if "weight" in key:
a__ = val[:dim, :]
a__ = val[
dim : dim * 2, :
]
a__ = val[-dim:, :]
else:
a__ = val[:dim]
a__ = val[
dim : dim * 2
]
a__ = val[-dim:]
else:
a__ = rename_key(__lowerCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
a__ = val.T
a__ = val
return orig_state_dict
def __lowercase ( __lowerCAmelCase : List[str] ):
if num_frames == 8:
a__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 1_6:
a__ = 'eating_spaghetti.npy'
elif num_frames == 3_2:
a__ = 'eating_spaghetti_32_frames.npy'
a__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=__lowerCAmelCase , repo_type='dataset' , )
a__ = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=False ):
a__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
a__ = model_to_url[model_name]
a__ = 8
if "16-frames" in model_name:
a__ = 1_6
elif "shot" in model_name:
a__ = 3_2
a__ = get_xclip_config(__lowerCAmelCase , __lowerCAmelCase )
a__ = XCLIPModel(__lowerCAmelCase )
model.eval()
if "drive" in checkpoint_url:
a__ = 'pytorch_model.bin'
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )['model']
else:
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase )['model']
a__ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
a__ = XCLIPModel(__lowerCAmelCase )
a__ , a__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
a__ = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4
a__ = VideoMAEImageProcessor(size=__lowerCAmelCase )
a__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
a__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
a__ = XCLIPProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
a__ = prepare_video(__lowerCAmelCase )
a__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=__lowerCAmelCase , return_tensors='pt' , padding=__lowerCAmelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
a__ = model(**__lowerCAmelCase )
# Verify outputs
a__ = outputs.logits_per_video
a__ = logits_per_video.softmax(dim=1 )
print('Probs:' , __lowerCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
a__ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
a__ = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
a__ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
a__ = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
a__ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
a__ = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
a__ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
a__ = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
a__ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
a__ = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
a__ = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
a__ = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
a__ = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
a__ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
a__ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
a__ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
a__ = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
a__ = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(__lowerCAmelCase , organization='nielsr' )
processor.push_to_hub(__lowerCAmelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(__lowerCAmelCase , organization='nielsr' )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case : int = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 240
|
def __lowercase ( __lowerCAmelCase : list[int] ):
a__ = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
a__ = nums.pop(0 )
a__ = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def __lowercase ( __lowerCAmelCase : Optional[int] ):
def backtrack(__lowerCAmelCase : Any ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
a__ , a__ = nums[i], nums[start]
backtrack(start + 1 )
a__ , a__ = nums[i], nums[start] # backtrack
a__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case : Optional[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 240
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ : str = get_tests_dir("""fixtures""")
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = mock.Mock()
SCREAMING_SNAKE_CASE_ : List[str] = 500
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Any = HTTPError
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_) as mock_head:
SCREAMING_SNAKE_CASE_ : int = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''')
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''')
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(lowercase_)
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_)
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : List[str] = CustomFeatureExtractor.from_pretrained(lowercase_)
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE_ : Tuple = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowercase_)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''')
| 362
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
| 0
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = 1
lowercase : Union[str, Any] = 3
lowercase : str = (32, 32)
lowercase : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
return image
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
lowercase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
lowercase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
def extract(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
class __SCREAMING_SNAKE_CASE :
def __init__( self ):
lowercase : Any = torch.ones([0] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
self.pixel_values.to(SCREAMING_SNAKE_CASE__ )
return self
return Out()
return extract
def __lowerCamelCase ( self ):
lowercase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Dict = self.dummy_cond_unet
lowercase : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[Any] = self.dummy_vae
lowercase : Dict = self.dummy_text_encoder
lowercase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase : Dict = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , )
lowercase : Any = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowercase : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowercase : Optional[Any] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase : str = output.images
lowercase : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowercase : Tuple = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = self.dummy_cond_unet
lowercase : Dict = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = self.dummy_vae
lowercase : Any = self.dummy_text_encoder
lowercase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase : List[Any] = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , )
lowercase : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = '''A painting of a squirrel eating a burger'''
lowercase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowercase : List[Any] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase : Any = output.images
lowercase : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowercase : Dict = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
lowercase : List[Any] = image[0, -3:, -3:, -1]
lowercase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE__ )
assert pipe.safety_checker is None
lowercase : List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase : Tuple = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCamelCase ( self ):
lowercase : Any = self.dummy_cond_unet
lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.dummy_vae
lowercase : str = self.dummy_text_encoder
lowercase : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase : List[Any] = unet.half()
lowercase : Optional[Any] = vae.half()
lowercase : Any = bert.half()
# make sure here that pndm scheduler skips prk
lowercase : Optional[Any] = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , )
lowercase : str = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowercase : str = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase : Tuple = 4003660346
lowercase : str = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase : Dict = output.images
lowercase : Tuple = image[0, -3:, -3:, -1]
lowercase : Tuple = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : int = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase : Optional[int] = output.images
lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
lowercase : List[str] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase : Any = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase : int = 2734971755
lowercase : Union[str, Any] = 7
lowercase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : str = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase : Tuple = output.images
lowercase : str = image[0, -3:, -3:, -1]
lowercase : Dict = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase : Dict = output.images
lowercase : Optional[Any] = image[0, -3:, -3:, -1]
lowercase : int = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
lowercase : List[str] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase : Tuple = 1044355234
lowercase : Any = 12
lowercase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase : int = output.images
lowercase : Tuple = image[0, -3:, -3:, -1]
lowercase : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase : List[Any] = output.images
lowercase : int = image[0, -3:, -3:, -1]
lowercase : Optional[int] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 173
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 16 ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : List[Any] = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : List[Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=_UpperCamelCase, max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Union[str, Any] = datasets.map(
_UpperCamelCase, batched=_UpperCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Tuple = 16
elif accelerator.mixed_precision != "no":
lowercase : str = 8
else:
lowercase : List[str] = None
return tokenizer.pad(
_UpperCamelCase, padding='''longest''', max_length=_UpperCamelCase, pad_to_multiple_of=_UpperCamelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
lowercase : int = DataLoader(
tokenized_datasets['''train'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
lowercase : str = DataLoader(
tokenized_datasets['''validation'''], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', _UpperCamelCase ) == "1":
lowercase : Tuple = 2
# New Code #
lowercase : Optional[int] = int(args.gradient_accumulation_steps )
lowercase : Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
lowercase : Tuple = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Dict = config['''lr''']
lowercase : List[str] = int(config['''num_epochs'''] )
lowercase : str = int(config['''seed'''] )
lowercase : str = int(config['''batch_size'''] )
lowercase : Any = evaluate.load('''glue''', '''mrpc''' )
set_seed(_UpperCamelCase )
lowercase , lowercase : Dict = get_dataloaders(_UpperCamelCase, _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : int = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any = AdamW(params=model.parameters(), lr=_UpperCamelCase )
# Instantiate scheduler
lowercase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = accelerator.prepare(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase, model=_UpperCamelCase, local_sgd_steps=_UpperCamelCase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
lowercase : int = model(**_UpperCamelCase )
lowercase : Optional[int] = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Optional[int] = model(**_UpperCamelCase )
lowercase : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowercase , lowercase : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase, references=_UpperCamelCase, )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", _UpperCamelCase )
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=_UpperCamelCase, default=_UpperCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=_UpperCamelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=_UpperCamelCase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
lowercase : List[Any] = parser.parse_args()
lowercase : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
main()
| 173
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=[1, 16, 4, 4] , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Optional[int] = batch_size
a__ : Any = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : List[str] = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Any = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Tuple = scope
a__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a__ : Any = (self.image_size // 32) ** 2
a__ : List[Any] = num_patches + 1
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : int = None
if self.use_labels:
a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : List[str] = ViTHybridModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.type_sequence_label_size
a__ : Union[str, Any] = ViTHybridForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Union[str, Any] = config_and_inputs
a__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__A : List[str] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__A : Any = False
__A : Optional[int] = False
__A : Optional[Any] = False
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = ViTHybridModelTester(self)
a__ : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> int:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = _config_zero_init(lowercase)
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(config=lowercase)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a__ : Dict = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = ViTHybridModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase)
a__ : List[str] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowercase)
# verify the logits
a__ : Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
@slow
@require_accelerate
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
a__ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
a__ : Any = prepare_img()
a__ : str = image_processor(images=lowercase , return_tensors='pt')
a__ : List[Any] = model(**lowercase)
a__ : int = outputs.logits
# model predicts one of the 1000 ImageNet classes
a__ : List[str] = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 99
|
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: List[str] = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Tuple = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : Optional[Any] = {'unk_token': '<unk>'}
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : int , **__snake_case : str ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : Any ):
a : int = 'adapt react readapt apt'
a : Any = 'adapt react readapt apt'
return input_text, output_text
def lowercase_ ( self : Dict ):
a : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : List[str] = 'adapt react readapt apt'
a : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Dict = tokens + [tokenizer.unk_token]
a : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
| 96
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
snake_case__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case__ : Optional[int] = field(
default=lowercase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
snake_case__ : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
snake_case__ : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case__ : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
if self.train_file is not None:
a_ : str = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a_ : Dict = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
a_ : str = 'label' if 'label' in features[0].keys() else 'labels'
a_ : Union[str, Any] = [feature.pop(SCREAMING_SNAKE_CASE__ ) for feature in features]
a_ : Any = len(SCREAMING_SNAKE_CASE__ )
a_ : int = len(features[0]['input_ids'] )
a_ : int = [
[{k: v[i] for k, v in feature.items()} for i in range(SCREAMING_SNAKE_CASE__ )] for feature in features
]
a_ : Optional[Any] = list(chain(*SCREAMING_SNAKE_CASE__ ) )
a_ : Tuple = self.tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
a_ : Union[str, Any] = {k: v.view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ) for k, v in batch.items()}
# Add back labels
a_ : Dict = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.intaa )
return batch
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
a_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __A , __A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ : Dict = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a_ : List[str] = {}
if data_args.train_file is not None:
a_ : int = data_args.train_file
if data_args.validation_file is not None:
a_ : Dict = data_args.validation_file
a_ : Tuple = data_args.train_file.split('.' )[-1]
a_ : List[str] = load_dataset(
__A , data_files=__A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
a_ : Tuple = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a_ : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a_ : int = [F"""ending{i}""" for i in range(4 )]
a_ : Dict = 'sent1'
a_ : Dict = 'sent2'
if data_args.max_seq_length is None:
a_ : int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
a_ : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
a_ : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__A : Tuple ):
a_ : Optional[int] = [[context] * 4 for context in examples[context_name]]
a_ : Union[str, Any] = examples[question_header_name]
a_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__A )
]
# Flatten out
a_ : Optional[int] = list(chain(*__A ) )
a_ : Any = list(chain(*__A ) )
# Tokenize
a_ : Any = tokenizer(
__A , __A , truncation=__A , max_length=__A , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__A ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
a_ : Optional[int] = raw_datasets['train']
if data_args.max_train_samples is not None:
a_ : str = min(len(__A ) , data_args.max_train_samples )
a_ : List[str] = train_dataset.select(range(__A ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
a_ : int = train_dataset.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
a_ : Union[str, Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
a_ : List[str] = min(len(__A ) , data_args.max_eval_samples )
a_ : int = eval_dataset.select(range(__A ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
a_ : int = eval_dataset.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
a_ : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__A , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__A : Optional[Any] ):
a_ , a_ : List[Any] = eval_predictions
a_ : Tuple = np.argmax(__A , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a_ : str = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__A , data_collator=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
a_ : Any = None
if training_args.resume_from_checkpoint is not None:
a_ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ : Optional[int] = last_checkpoint
a_ : Dict = trainer.train(resume_from_checkpoint=__A )
trainer.save_model() # Saves the tokenizer too for easy upload
a_ : Union[str, Any] = train_result.metrics
a_ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
a_ : Any = min(__A , len(__A ) )
trainer.log_metrics('train' , __A )
trainer.save_metrics('train' , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ : Dict = trainer.evaluate()
a_ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
a_ : List[Any] = min(__A , len(__A ) )
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
a_ : Optional[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 32
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__: str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : Any , __magic_name__ : Optional[int]=13 , __magic_name__ : Any=7 , __magic_name__ : List[Any]=True , __magic_name__ : int=False , __magic_name__ : Optional[int]=99 , __magic_name__ : Optional[int]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[str]=4 , __magic_name__ : Any=4 , __magic_name__ : List[str]="relu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Tuple=0.0 , __magic_name__ : Union[str, Any]=20 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Dict=1 , __magic_name__ : Dict=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
def __A ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE_ = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = prepare_mam_aaa_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def __A ( self : Any ) -> List[str]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = MaMaaaModel(config=__lowercase ).get_decoder().to(__lowercase ).eval()
SCREAMING_SNAKE_CASE_ = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ = inputs_dict['''attention_mask''']
SCREAMING_SNAKE_CASE_ = inputs_dict['''head_mask''']
# first forward pass
SCREAMING_SNAKE_CASE_ = model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(__lowercase , attention_mask=__lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-2 ) )
def __A ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = MaMaaaModel(config=__lowercase ).to(__lowercase ).eval()
SCREAMING_SNAKE_CASE_ = model(**__lowercase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = model.get_encoder()
encoder.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE_ = MaMaaaEncoder.from_pretrained(__lowercase ).to(__lowercase )
SCREAMING_SNAKE_CASE_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = model.get_decoder()
decoder.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE_ = MaMaaaDecoder.from_pretrained(__lowercase ).to(__lowercase )
SCREAMING_SNAKE_CASE_ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__lowercase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> int:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__lowercase )
def __A ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertEqual(info["missing_keys"] , [] )
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase )
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowercase )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE_ = model_class(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ = inputs['''input_ids''']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE_ = inputs['''input_ids''']
SCREAMING_SNAKE_CASE_ = inputs.get("decoder_input_ids" , __lowercase )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __lowercase )
SCREAMING_SNAKE_CASE_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ = wte(__lowercase )
else:
SCREAMING_SNAKE_CASE_ = wte(__lowercase )
SCREAMING_SNAKE_CASE_ = wte(__lowercase )
with torch.no_grad():
model(**__lowercase )[0]
def __A ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(__lowercase )
SCREAMING_SNAKE_CASE_ = MaMaaaForConditionalGeneration(__lowercase ).eval().to(__lowercase )
if torch_device == "cuda":
model.half()
model.generate(__lowercase , attention_mask=__lowercase )
model.generate(num_beams=4 , do_sample=__lowercase , early_stopping=__lowercase , num_return_sequences=3 )
def a__ ( __UpperCamelCase ):
return torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
A : List[str] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Any ) -> Any:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __A ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__lowercase )
SCREAMING_SNAKE_CASE_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
SCREAMING_SNAKE_CASE_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
SCREAMING_SNAKE_CASE_ = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowercase )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , __lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase ) )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__lowercase )
# change to intended input
SCREAMING_SNAKE_CASE_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
SCREAMING_SNAKE_CASE_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
SCREAMING_SNAKE_CASE_ = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowercase )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase ) )
def __A ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__lowercase )
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
SCREAMING_SNAKE_CASE_ = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE_ = tokenizer(__lowercase , padding=__lowercase , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = model.generate(
input_ids=dct["input_ids"].to(__lowercase ) , attention_mask=dct["attention_mask"].to(__lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
SCREAMING_SNAKE_CASE_ = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__lowercase , skip_special_tokens=__lowercase )
assert generated == expected_en
| 350
|
from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.