code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 89 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
_a : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ) )
def __lowercase ( self : str ):
_a : List[str] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ) )
def __lowercase ( self : Dict ):
_a : Any = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase ) )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ) )
def __lowercase ( self : Optional[Any] ):
_a : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
def __lowercase ( self : Union[str, Any] ):
_a : Dict = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
# pass variant but use the non-variant filenames
_a : Dict = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_a : Optional[int] = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
def __lowercase ( self : str ):
_a : Any = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a : int = 'fp16'
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
def __lowercase ( self : Dict ):
_a : List[Any] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_a : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
# pass variant but use the non-variant filenames
_a : Any = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_a : Optional[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
def __lowercase ( self : Union[str, Any] ):
_a : Any = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_UpperCAmelCase ,variant=_UpperCAmelCase ) )
| 89 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Tuple ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Tuple=7 ,_UpperCAmelCase : Optional[int]=3 ,_UpperCAmelCase : List[Any]=18 ,_UpperCAmelCase : Dict=30 ,_UpperCAmelCase : Dict=400 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Tuple=True ,):
_a : str = size if size is not None else {'height': 18, 'width': 18}
_a : List[str] = parent
_a : Any = batch_size
_a : int = num_channels
_a : Optional[Any] = image_size
_a : str = min_resolution
_a : List[str] = max_resolution
_a : Dict = do_resize
_a : Union[str, Any] = size
_a : Optional[int] = apply_ocr
def __lowercase ( self : Dict ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowercase ( self : Tuple ):
_a : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def __lowercase ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
_a : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'apply_ocr' ) )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
_a : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : str ):
# Initialize image_processing
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,_UpperCAmelCase )
self.assertIsInstance(encoding.boxes ,_UpperCAmelCase )
# Test batched
_a : Union[str, Any] = image_processing(_UpperCAmelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def __lowercase ( self : Dict ):
# Initialize image_processing
_a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,np.ndarray )
# Test not batched input
_a : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
_a : Optional[Any] = image_processing(_UpperCAmelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def __lowercase ( self : List[Any] ):
# Initialize image_processing
_a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,torch.Tensor )
# Test not batched input
_a : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
_a : Dict = image_processing(_UpperCAmelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def __lowercase ( self : int ):
# with apply_OCR = True
_a : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
_a : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
_a : int = Image.open(ds[0]['file'] ).convert('RGB' )
_a : Dict = image_processing(_UpperCAmelCase ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_a : Dict = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_a : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCAmelCase )
self.assertListEqual(encoding.boxes ,_UpperCAmelCase )
# with apply_OCR = False
_a : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
_a : Tuple = image_processing(_UpperCAmelCase ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 89 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Union[str, Any]=32 * 4 ,_UpperCAmelCase : int=32 * 6 ,_UpperCAmelCase : List[Any]=4 ,_UpperCAmelCase : Tuple=32 ,):
_a : Tuple = parent
_a : List[str] = batch_size
_a : Optional[int] = is_training
_a : Optional[int] = use_auxiliary_loss
_a : int = num_queries
_a : Any = num_channels
_a : List[Any] = min_size
_a : str = max_size
_a : Any = num_labels
_a : Optional[int] = mask_feature_size
def __lowercase ( self : List[str] ):
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
_a : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_UpperCAmelCase )
_a : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_UpperCAmelCase ) > 0.5
).float()
_a : Any = (torch.rand((self.batch_size, self.num_labels) ,device=_UpperCAmelCase ) > 0.5).long()
_a : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : List[str] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def __lowercase ( self : Optional[int] ):
_a , _a , _a , _a , _a : Union[str, Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ):
_a : Dict = output.encoder_hidden_states
_a : Tuple = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def __lowercase ( self : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any=False ):
with torch.no_grad():
_a : List[str] = MaskFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : int = model(pixel_values=_UpperCAmelCase ,pixel_mask=_UpperCAmelCase )
_a : int = model(_UpperCAmelCase ,output_hidden_states=_UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ):
_a : List[str] = MaskFormerForInstanceSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Dict = model(pixel_values=_UpperCAmelCase ,pixel_mask=_UpperCAmelCase )
_a : List[Any] = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
_a : Optional[int] = model(
pixel_values=_UpperCAmelCase ,pixel_mask=_UpperCAmelCase ,mask_labels=_UpperCAmelCase ,class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase : Any = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Tuple = False
def __lowercase ( self : int ):
_a : Any = MaskFormerModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __lowercase ( self : Union[str, Any] ):
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase ,**_UpperCAmelCase ,output_hidden_states=_UpperCAmelCase )
def __lowercase ( self : str ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __lowercase ( self : Dict ):
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __lowercase ( self : List[str] ):
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __lowercase ( self : Any ):
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __lowercase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[Any] ):
pass
def __lowercase ( self : Optional[Any] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(_UpperCAmelCase )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a : List[Any] = MaskFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Optional[int] = (self.model_tester.min_size,) * 2
_a : Optional[Any] = {
'pixel_values': torch.randn((2, 3, *size) ,device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) ,device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 ,10 ,device=_UpperCAmelCase ).long(),
}
_a : str = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCAmelCase )
_a : Any = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Tuple ):
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase ,**_UpperCAmelCase ,output_hidden_states=_UpperCAmelCase )
def __lowercase ( self : str ):
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
_a : Any = model(**_UpperCAmelCase ,output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a : List[Any] = self.all_model_classes[1]
_a , _a , _a , _a , _a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : Optional[Any] = model(_UpperCAmelCase ,mask_labels=_UpperCAmelCase ,class_labels=_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
# only MaskFormerForInstanceSegmentation has the loss
_a : Tuple = self.all_model_classes[1]
_a , _a , _a , _a , _a : Any = self.model_tester.prepare_config_and_inputs()
_a : Any = True
_a : str = True
_a : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : List[str] = model(_UpperCAmelCase ,mask_labels=_UpperCAmelCase ,class_labels=_UpperCAmelCase )
_a : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def __lowerCamelCase ( ) -> int:
_a : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Tuple ):
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __lowercase ( self : List[Any] ):
_a : Any = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : List[str] = prepare_img()
_a : Optional[int] = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
_a : List[str] = model(**_UpperCAmelCase )
_a : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
_a : Optional[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
_a : Optional[Any] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
def __lowercase ( self : List[Any] ):
_a : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_UpperCAmelCase )
.eval()
)
_a : Union[str, Any] = self.default_image_processor
_a : Tuple = prepare_img()
_a : str = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
_a : Any = model(**_UpperCAmelCase )
# masks_queries_logits
_a : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
_a : Union[str, Any] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_a : int = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : List[str] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
def __lowercase ( self : Tuple ):
_a : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_UpperCAmelCase )
.eval()
)
_a : Union[str, Any] = self.default_image_processor
_a : Dict = prepare_img()
_a : Any = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
_a : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# masks_queries_logits
_a : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
_a : str = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_a : Optional[Any] = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
# class_queries_logits
_a : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Optional[int] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
def __lowercase ( self : Optional[int] ):
_a : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_UpperCAmelCase )
.eval()
)
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_UpperCAmelCase )
_a : int = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
_a : Optional[Any] = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[Any] = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 1 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __magic_name__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple ,_UpperCAmelCase : float ,_UpperCAmelCase : Callable ,_UpperCAmelCase : int ,_UpperCAmelCase : float = 1.0 ,_UpperCAmelCase : str = None ,):
super().__init__()
_a : List[Any] = initial_learning_rate
_a : Optional[int] = warmup_steps
_a : List[str] = power
_a : Dict = decay_schedule_fn
_a : List[Any] = name
def __call__( self : Optional[int] ,_UpperCAmelCase : Dict ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_a : str = tf.cast(_UpperCAmelCase ,tf.floataa )
_a : List[str] = tf.cast(self.warmup_steps ,tf.floataa )
_a : Tuple = global_step_float / warmup_steps_float
_a : Optional[int] = self.initial_learning_rate * tf.math.pow(_UpperCAmelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=_UpperCAmelCase ,)
def __lowercase ( self : Any ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1E-8 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ) -> List[Any]:
_a : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
_a : Optional[int] = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
_a : Union[str, Any] = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowerCAmelCase_ , )
else:
_a : List[str] = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : str ,_UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 ,_UpperCAmelCase : float = 0.9 ,_UpperCAmelCase : float = 0.9_99 ,_UpperCAmelCase : float = 1E-7 ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "AdamWeightDecay" ,**_UpperCAmelCase : int ,):
super().__init__(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = weight_decay_rate
_a : int = include_in_weight_decay
_a : str = exclude_from_weight_decay
@classmethod
def __lowercase ( cls : Any ,_UpperCAmelCase : int ):
_a : int = {'WarmUp': WarmUp}
return super(_UpperCAmelCase ,cls ).from_config(_UpperCAmelCase ,custom_objects=_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ):
super(_UpperCAmelCase ,self )._prepare_local(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : str = tf.constant(
self.weight_decay_rate ,name='adam_weight_decay_rate' )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ):
_a : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] ,use_locking=self._use_locking ,)
return tf.no_op()
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : Optional[int] ):
_a , _a : int = list(zip(*_UpperCAmelCase ) )
return super(_UpperCAmelCase ,self ).apply_gradients(zip(_UpperCAmelCase ,_UpperCAmelCase ) ,name=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_a : Union[str, Any] = apply_state or {}
_a : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_a : Any = self._fallback_apply_state(_UpperCAmelCase ,_UpperCAmelCase )
_a : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __lowercase ( self : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int=None ):
_a , _a : str = self._get_lr(var.device ,var.dtype.base_dtype ,_UpperCAmelCase )
_a : Any = self._decay_weights_op(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase ,self )._resource_apply_dense(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any]=None ):
_a , _a : Tuple = self._get_lr(var.device ,var.dtype.base_dtype ,_UpperCAmelCase )
_a : Tuple = self._decay_weights_op(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase ,self )._resource_apply_sparse(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : List[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_UpperCAmelCase ,_UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_UpperCAmelCase ,_UpperCAmelCase ) is not None:
return False
return True
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : List[str] ):
_a : Optional[Any] = []
_a : Any = None
@property
def __lowercase ( self : int ):
if self._accum_steps is None:
_a : List[str] = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=_UpperCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def __lowercase ( self : str ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int ,_UpperCAmelCase : Union[str, Any] ):
if not self._gradients:
_a : Dict = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_UpperCAmelCase ) ,trainable=_UpperCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_UpperCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,_UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_UpperCAmelCase )
self._accum_steps.assign_add(1 )
def __lowercase ( self : List[str] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_UpperCAmelCase ) )
| 89 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__lowerCAmelCase = namedtuple('''covid_data''', '''cases deaths recovered''')
def __lowerCamelCase ( lowerCAmelCase_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_a : Optional[int] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(lowerCAmelCase_ ).content ).xpath(lowerCAmelCase_ ) )
__lowerCAmelCase = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 89 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : List[Any] = tmp_path / 'cache'
_a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : List[str] = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ ).read()
_check_sql_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Union[str, Any] = tmp_path / 'cache'
_a : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_a : Tuple = features.copy() if features else default_expected_features
_a : Union[str, Any] = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Tuple = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ ).read()
_check_sql_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(lowerCAmelCase_ ) ) as con:
_a : List[str] = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_a : int = tmp_path / 'cache'
_a : Union[str, Any] = os.path.join(lowerCAmelCase_ , 'tmp.sql' )
_a : List[str] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCAmelCase_ ).read()
SqlDatasetWriter(lowerCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
_a : Union[str, Any] = iter_sql_file(lowerCAmelCase_ )
_a : int = iter_sql_file(lowerCAmelCase_ )
for rowa, rowa in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_a : Optional[int] = tmp_path / 'cache'
_a : int = os.path.join(lowerCAmelCase_ , 'tmp.sql' )
_a : str = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCAmelCase_ ).read()
SqlDatasetWriter(lowerCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
_a : str = iter_sql_file(lowerCAmelCase_ )
_a : List[str] = iter_sql_file(lowerCAmelCase_ )
for rowa, rowa in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_a : Optional[int] = tmp_path / 'cache'
_a : Any = os.path.join(lowerCAmelCase_ , 'tmp.sql' )
_a : List[str] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=lowerCAmelCase_ ).read()
with pytest.raises(lowerCAmelCase_ ):
SqlDatasetWriter(lowerCAmelCase_ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 89 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : int ,_UpperCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None ,_UpperCAmelCase : Optional[NamedSplit] = None ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Union[str, Any] ,):
_a : int = path_or_paths
_a : List[Any] = split if split or isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else 'train'
_a : Tuple = features
_a : Optional[Any] = cache_dir
_a : List[str] = keep_in_memory
_a : Tuple = streaming
_a : str = num_proc
_a : int = kwargs
@abstractmethod
def __lowercase ( self : Tuple ):
pass
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Optional[int] ,):
_a : Union[str, Any] = features
_a : List[str] = cache_dir
_a : Optional[Any] = keep_in_memory
_a : int = streaming
_a : int = num_proc
_a : List[str] = kwargs
@abstractmethod
def __lowercase ( self : List[Any] ):
pass
| 89 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase = '''true'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=16 ) -> Union[str, Any]:
set_seed(42 )
_a : Union[str, Any] = RegressionModel()
_a : Dict = deepcopy(lowerCAmelCase_ )
_a : Any = RegressionDataset(length=lowerCAmelCase_ )
_a : List[str] = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
model.to(accelerator.device )
_a , _a : int = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return model, ddp_model, dataloader
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_a : Dict = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCAmelCase_ ):
_a : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
with accelerator.main_process_first():
_a : int = dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
_a : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase_ ):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(lowerCAmelCase_ , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=16 )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[int] = Accelerator(dispatch_batches=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_a : Optional[Any] = get_dataloader(lowerCAmelCase_ , not dispatch_batches )
_a : List[str] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCAmelCase_ )
_a , _a : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Union[str, Any] = []
for batch in dataloader:
_a , _a : Any = batch.values()
with torch.no_grad():
_a : str = model(lowerCAmelCase_ )
_a , _a : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_a , _a : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_ )
targs.append(lowerCAmelCase_ )
_a , _a : Dict = torch.cat(lowerCAmelCase_ ), torch.cat(lowerCAmelCase_ )
return logits, targs
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=16 ) -> List[Any]:
_a , _a , _a : int = get_basic_setup(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a : Union[str, Any] = generate_predictions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert (
len(lowerCAmelCase_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_ )}"""
def __lowerCamelCase ( lowerCAmelCase_ = False , lowerCAmelCase_ = False ) -> List[Any]:
_a : Optional[Any] = evaluate.load('glue' , 'mrpc' )
_a , _a : Any = get_mrpc_setup(lowerCAmelCase_ , lowerCAmelCase_ )
# First do baseline
_a , _a , _a : str = setup['no']
model.to(lowerCAmelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_ )
with torch.inference_mode():
_a : List[Any] = model(**lowerCAmelCase_ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase_ , references=batch['labels'] )
_a : Any = metric.compute()
# Then do distributed
_a , _a , _a : int = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_a : List[str] = model(**lowerCAmelCase_ )
_a : int = outputs.logits.argmax(dim=-1 )
_a : Optional[int] = batch['labels']
_a , _a : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
_a : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __lowerCamelCase ( ) -> str:
_a : Optional[int] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_a : Optional[Any] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowerCAmelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_a : int = Accelerator()
test_torch_metrics(lowerCAmelCase_ , 512 )
accelerator.state._reset_state()
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'cvt'
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[7, 3, 3] ,_UpperCAmelCase : List[str]=[4, 2, 2] ,_UpperCAmelCase : Optional[Any]=[2, 1, 1] ,_UpperCAmelCase : List[str]=[64, 192, 384] ,_UpperCAmelCase : Tuple=[1, 3, 6] ,_UpperCAmelCase : Optional[Any]=[1, 2, 10] ,_UpperCAmelCase : Optional[int]=[4.0, 4.0, 4.0] ,_UpperCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] ,_UpperCAmelCase : List[str]=[0.0, 0.0, 0.0] ,_UpperCAmelCase : List[str]=[0.0, 0.0, 0.1] ,_UpperCAmelCase : Dict=[True, True, True] ,_UpperCAmelCase : Dict=[False, False, True] ,_UpperCAmelCase : str=["dw_bn", "dw_bn", "dw_bn"] ,_UpperCAmelCase : int=[3, 3, 3] ,_UpperCAmelCase : Optional[int]=[1, 1, 1] ,_UpperCAmelCase : List[str]=[2, 2, 2] ,_UpperCAmelCase : List[str]=[1, 1, 1] ,_UpperCAmelCase : Optional[Any]=[1, 1, 1] ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : int=1E-12 ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(**_UpperCAmelCase )
_a : Any = num_channels
_a : List[Any] = patch_sizes
_a : int = patch_stride
_a : List[Any] = patch_padding
_a : Any = embed_dim
_a : List[str] = num_heads
_a : Tuple = depth
_a : int = mlp_ratio
_a : List[Any] = attention_drop_rate
_a : Optional[int] = drop_rate
_a : str = drop_path_rate
_a : Tuple = qkv_bias
_a : str = cls_token
_a : Optional[int] = qkv_projection_method
_a : Any = kernel_qkv
_a : Any = padding_kv
_a : str = stride_kv
_a : Tuple = padding_q
_a : List[str] = stride_q
_a : List[str] = initializer_range
_a : Dict = layer_norm_eps
| 89 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : int = 'huggingface/label-files'
_a : Union[str, Any] = 'imagenet-1k-id2label.json'
_a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : str = {v: k for k, v in idalabel.items()}
_a : Dict = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_a : List[Any] = BitConfig(
conv_layer=lowerCAmelCase_ , num_labels=1000 , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , )
return config
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
if "stem.conv" in name:
_a : int = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_a : int = name.replace('blocks' , 'layers' )
if "head.fc" in name:
_a : Tuple = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_a : List[Any] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_a : Optional[int] = 'bit.encoder.' + name
return name
def __lowerCamelCase ( ) -> str:
_a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = get_config(lowerCAmelCase_ )
# load original model from timm
_a : List[Any] = create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model
_a : Optional[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
_a : Optional[int] = state_dict.pop(lowerCAmelCase_ )
_a : Any = val.squeeze() if 'head' in key else val
# load HuggingFace model
_a : str = BitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# create image processor
_a : Union[str, Any] = create_transform(**resolve_data_config({} , model=lowerCAmelCase_ ) )
_a : Union[str, Any] = transform.transforms
_a : Any = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_a : str = BitImageProcessor(
do_resize=lowerCAmelCase_ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase_ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_a : Tuple = prepare_img()
_a : int = transform(lowerCAmelCase_ ).unsqueeze(0 )
_a : List[str] = processor(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
# verify logits
with torch.no_grad():
_a : int = model(lowerCAmelCase_ )
_a : Union[str, Any] = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_a : List[Any] = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowerCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 1 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_a : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_a : Optional[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('RGB' )
_a : Optional[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_a : List[str] = transform(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
return image
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if "visual_encoder" in key:
_a : List[str] = re.sub('visual_encoder*' , 'vision_model.encoder' , lowerCAmelCase_ )
if "blocks" in key:
_a : int = re.sub(r'blocks' , 'layers' , lowerCAmelCase_ )
if "attn" in key:
_a : Optional[int] = re.sub(r'attn' , 'self_attn' , lowerCAmelCase_ )
if "norm1" in key:
_a : List[str] = re.sub(r'norm1' , 'layer_norm1' , lowerCAmelCase_ )
if "norm2" in key:
_a : str = re.sub(r'norm2' , 'layer_norm2' , lowerCAmelCase_ )
if "encoder.norm" in key:
_a : int = re.sub(r'encoder.norm' , 'post_layernorm' , lowerCAmelCase_ )
if "encoder.patch_embed.proj" in key:
_a : Union[str, Any] = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowerCAmelCase_ )
if "encoder.pos_embed" in key:
_a : Tuple = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , lowerCAmelCase_ )
if "encoder.cls_token" in key:
_a : Tuple = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , lowerCAmelCase_ )
if "self_attn" in key:
_a : Any = re.sub(r'self_attn.proj' , 'self_attn.projection' , lowerCAmelCase_ )
return key
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> int:
if config_path is not None:
_a : Tuple = BlipConfig.from_pretrained(lowerCAmelCase_ )
else:
_a : int = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_a : Dict = BlipForConditionalGeneration(lowerCAmelCase_ ).eval()
_a : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_a : List[str] = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit='base' )
_a : int = pt_model.eval()
_a : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
_a : List[str] = modified_state_dict.pop(lowerCAmelCase_ )
_a : Optional[Any] = rename_key(lowerCAmelCase_ )
_a : Any = value
hf_model.load_state_dict(lowerCAmelCase_ )
_a : Optional[int] = 384
_a : List[str] = load_demo_image(image_size=lowerCAmelCase_ , device='cpu' )
_a : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
_a : int = tokenizer(['a picture of'] ).input_ids
_a : List[str] = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
_a : List[str] = hf_model.generate(lowerCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_a : Tuple = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_a : Optional[Any] = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' )
vqa_model.eval()
_a : Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_a : int = modified_state_dict.pop(lowerCAmelCase_ )
_a : Optional[int] = rename_key(lowerCAmelCase_ )
_a : Dict = value
_a : Optional[int] = BlipForQuestionAnswering(lowerCAmelCase_ )
hf_vqa_model.load_state_dict(lowerCAmelCase_ )
_a : List[str] = ['How many dogs are in this image?']
_a : str = tokenizer(lowerCAmelCase_ , return_tensors='pt' ).input_ids
_a : Dict = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_a : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_a : List[str] = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='base' )
itm_model.eval()
_a : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
_a : Optional[int] = modified_state_dict.pop(lowerCAmelCase_ )
_a : Any = rename_key(lowerCAmelCase_ )
_a : Dict = value
_a : Tuple = BlipForImageTextRetrieval(lowerCAmelCase_ )
_a : Any = ['A picture of a woman with a dog sitting in a beach']
_a : List[str] = tokenizer(
lowerCAmelCase_ , return_tensors='pt' , padding='max_length' , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_ )
hf_itm_model.eval()
_a : str = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
_a : Any = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[list[float]]:
_a : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_a : List[str] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_a : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
_a , _a : Dict = matrix[1][1], matrix[0][0]
_a , _a : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_a : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_a : Any = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_a : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_a : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_a : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_a : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_a : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_a : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_a : int = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_a : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_a : Dict = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_a : Dict = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
_a : int = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_a : Union[str, Any] = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase_ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 89 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> set:
_a : Dict = set()
# edges = list of graph's edges
_a : Any = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_a , _a : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def __lowerCamelCase ( lowerCAmelCase_ ) -> set:
_a : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 89 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 1 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
# Initialise PyTorch model
_a : Optional[Any] = FunnelConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : Optional[Any] = FunnelBaseModel(lowerCAmelCase_ ) if base_model else FunnelModel(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 89 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_a : Any = (low + high) // 2
_a , _a , _a : Tuple = max_subarray(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a , _a : List[Any] = max_subarray(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
_a , _a , _a : Optional[int] = max_cross_sum(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[int, int, float]:
_a , _a : List[Any] = float('-inf' ), -1
_a , _a : List[str] = float('-inf' ), -1
_a : int | float = 0
for i in range(lowerCAmelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_a : int = summ
_a : List[str] = i
_a : List[str] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_a : List[Any] = summ
_a : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCamelCase ( lowerCAmelCase_ ) -> float:
_a : List[Any] = [randint(1 , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
_a : str = time.time()
max_subarray(lowerCAmelCase_ , 0 , input_size - 1 )
_a : Optional[Any] = time.time()
return end - start
def __lowerCamelCase ( ) -> None:
_a : Tuple = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_a : Union[str, Any] = [time_max_subarray(lowerCAmelCase_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
print(lowerCAmelCase_ , '\t\t' , lowerCAmelCase_ )
plt.plot(lowerCAmelCase_ , lowerCAmelCase_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 89 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 1 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__lowerCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[int]:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_a : List[str] = tmp_path_factory.getbasetemp() / 'cache'
_a : Any = test_hf_cache_home / 'datasets'
_a : str = test_hf_cache_home / 'metrics'
_a : Optional[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowerCAmelCase_ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowerCAmelCase_ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowerCAmelCase_ ) )
_a : Any = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowerCAmelCase_ ) )
_a : str = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCAmelCase_ ) )
@pytest.fixture(autouse=lowerCAmelCase_ , scope='session' )
def __lowerCamelCase ( ) -> Tuple:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowerCAmelCase_ )
@pytest.fixture
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[int]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowerCAmelCase_ )
| 89 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCAmelCase = False
@skip_mps
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowercase ( cls : Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def __lowercase ( cls : List[str] ):
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def __lowercase ( self : str ):
torch.manual_seed(0 )
_a : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCAmelCase ,)
_a : Dict = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_a : List[Any] = CLIPTextModel(_UpperCAmelCase )
_a : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : int = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : Union[str, Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowercase ( self : Union[str, Any] ):
_a : Any = 'cpu'
_a : Optional[Any] = self.get_dummy_components()
_a : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : int = pipe(**_UpperCAmelCase ).images
_a : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 64, 64, 3) )
_a : int = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
_a : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase ,1E-3 )
def __lowercase ( self : Union[str, Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowercase ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4 )
def __lowercase ( self : Any ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowercase ( self : Optional[Any] ):
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowercase ( self : List[str] ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
@classmethod
def __lowercase ( cls : str ):
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def __lowercase ( cls : Dict ):
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def __lowercase ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : List[Any] ):
_a : List[str] = torch.manual_seed(51 )
_a : str = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=_UpperCAmelCase ,torch_dtype=torch.floataa )
pipe.to('cuda' )
_a : List[Any] = 'a painting of an elephant with glasses'
_a : Any = [5, 7]
_a : List[Any] = pipe(
prompt=_UpperCAmelCase ,token_indices=_UpperCAmelCase ,guidance_scale=7.5 ,generator=_UpperCAmelCase ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
_a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 89 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip_2_vision_model'
def __init__( self : List[Any] ,_UpperCAmelCase : List[Any]=1408 ,_UpperCAmelCase : Optional[int]=6144 ,_UpperCAmelCase : List[str]=39 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=224 ,_UpperCAmelCase : Any=14 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[int]=0.0_00_01 ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Union[str, Any]=1E-10 ,_UpperCAmelCase : Any=True ,**_UpperCAmelCase : Dict ,):
super().__init__(**_UpperCAmelCase )
_a : int = hidden_size
_a : List[str] = intermediate_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Tuple = patch_size
_a : List[str] = image_size
_a : int = initializer_range
_a : Union[str, Any] = attention_dropout
_a : Union[str, Any] = layer_norm_eps
_a : List[str] = hidden_act
_a : Any = qkv_bias
@classmethod
def __lowercase ( cls : Tuple ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Any ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_a , _a : List[Any] = cls.get_config_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip_2_qformer'
def __init__( self : int ,_UpperCAmelCase : List[str]=30522 ,_UpperCAmelCase : Dict=768 ,_UpperCAmelCase : Tuple=12 ,_UpperCAmelCase : Any=12 ,_UpperCAmelCase : int=3072 ,_UpperCAmelCase : str="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1E-12 ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : Optional[int]="absolute" ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Any=1408 ,**_UpperCAmelCase : Optional[int] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Any = vocab_size
_a : Optional[int] = hidden_size
_a : Dict = num_hidden_layers
_a : Dict = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : Tuple = initializer_range
_a : Optional[int] = layer_norm_eps
_a : int = position_embedding_type
_a : Optional[Any] = cross_attention_frequency
_a : List[str] = encoder_hidden_size
@classmethod
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Union[str, Any] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_a , _a : str = cls.get_config_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Any = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip-2'
lowerCAmelCase : Any = True
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=32 ,**_UpperCAmelCase : str ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
_a : str = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_a : List[str] = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_a : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_a : Optional[Any] = BlipaVisionConfig(**_UpperCAmelCase )
_a : Tuple = BlipaQFormerConfig(**_UpperCAmelCase )
_a : Optional[int] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_a : Any = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
_a : Dict = self.text_config.tie_word_embeddings
_a : Union[str, Any] = self.text_config.is_encoder_decoder
_a : int = num_query_tokens
_a : str = self.vision_config.hidden_size
_a : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a : Optional[int] = 1.0
_a : str = 0.02
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : BlipaVisionConfig ,_UpperCAmelCase : BlipaQFormerConfig ,_UpperCAmelCase : PretrainedConfig ,**_UpperCAmelCase : List[Any] ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_UpperCAmelCase ,)
def __lowercase ( self : int ):
_a : Dict = copy.deepcopy(self.__dict__ )
_a : Any = self.vision_config.to_dict()
_a : Optional[int] = self.qformer_config.to_dict()
_a : Dict = self.text_config.to_dict()
_a : Optional[Any] = self.__class__.model_type
return output
| 89 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCAmelCase = dict(zip(vocab, range(len(vocab))))
__lowerCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(tmpdirname)
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__lowerCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 89 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[int] = ['image_processor', 'tokenizer']
lowerCAmelCase : List[str] = 'CLIPImageProcessor'
lowerCAmelCase : Union[str, Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Dict ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : int=None ,**_UpperCAmelCase : str ):
_a : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,_UpperCAmelCase ,)
_a : Dict = kwargs.pop('feature_extractor' )
_a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __call__( self : Union[str, Any] ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : Optional[Any]=None ,**_UpperCAmelCase : Optional[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_a : int = self.tokenizer(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if images is not None:
_a : Dict = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if text is not None and images is not None:
_a : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) ,tensor_type=_UpperCAmelCase )
def __lowercase ( self : int ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Dict ):
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,*_UpperCAmelCase : Any ,**_UpperCAmelCase : Dict ):
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Any ):
_a : Tuple = self.tokenizer.model_input_names
_a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 89 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowerCAmelCase = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__lowerCAmelCase = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__lowerCAmelCase = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
return float((preds == labels).mean() )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Union[str, Any] = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Tuple = float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
_a : str = float(spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __lowercase ( self : str ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def __lowercase ( self : int ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_UpperCAmelCase ,_UpperCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_UpperCAmelCase ,_UpperCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_UpperCAmelCase ,_UpperCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 89 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> Any:
_a : List[Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=lowerCAmelCase_ )
_a : Dict = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=lowerCAmelCase_ )
env_command_parser(subparsers=lowerCAmelCase_ )
launch_command_parser(subparsers=lowerCAmelCase_ )
tpu_command_parser(subparsers=lowerCAmelCase_ )
test_command_parser(subparsers=lowerCAmelCase_ )
# Let's go
_a : Optional[Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 89 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : int = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
_a : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
_a : List[str] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : int = 2
_a : str = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,)
_a : int = floats_tensor(control_image.shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Tuple ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : Tuple ):
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
_a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : str = MultiControlNetModel([controlneta, controlneta] )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : str = 2
_a : Union[str, Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
]
_a : str = floats_tensor(control_image[0].shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Optional[int] ):
_a : Dict = self.get_dummy_components()
_a : List[str] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
_a : Any = 10.0
_a : Optional[Any] = 4
_a : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : Optional[int] = scale
_a : Any = pipe(**_UpperCAmelCase )[0]
_a : Any = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = steps
_a : List[str] = scale
_a : List[Any] = pipe(**_UpperCAmelCase ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : int = scale
_a : List[str] = pipe(**_UpperCAmelCase ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : int = steps
_a : str = scale
_a : int = pipe(**_UpperCAmelCase ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowercase ( self : Optional[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowercase ( self : List[str] ):
_a : Optional[int] = self.get_dummy_components()
_a : Dict = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_UpperCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_a : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,safety_checker=_UpperCAmelCase ,controlnet=_UpperCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_a : Union[str, Any] = 'evil space-punk bird'
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_a : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_a : Optional[Any] = pipe(
_UpperCAmelCase ,_UpperCAmelCase ,control_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,)
_a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
_a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 89 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str=13 ,_UpperCAmelCase : Dict=10 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Optional[Any]=2 ,_UpperCAmelCase : Optional[Any]=2 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Optional[int]=32 ,_UpperCAmelCase : Dict=5 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : List[Any]=10 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : Any=0.9 ,_UpperCAmelCase : Optional[int]=None ,):
_a : List[Any] = parent
_a : str = batch_size
_a : List[Any] = image_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = patch_size
_a : Dict = tubelet_size
_a : Tuple = num_frames
_a : List[Any] = is_training
_a : Union[str, Any] = use_labels
_a : List[Any] = hidden_size
_a : str = num_hidden_layers
_a : str = num_attention_heads
_a : str = intermediate_size
_a : Tuple = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Optional[int] = type_sequence_label_size
_a : List[str] = initializer_range
_a : Dict = mask_ratio
_a : str = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_a : Tuple = (image_size // patch_size) ** 2
_a : int = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_a : Optional[int] = int(mask_ratio * self.seq_length )
def __lowercase ( self : List[Any] ):
_a : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Union[str, Any] ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ):
_a : int = VideoMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ):
_a : Optional[int] = VideoMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a : Dict = torch.ones((self.num_masks,) )
_a : Union[str, Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_a : Tuple = mask.expand(self.batch_size ,-1 ).bool()
_a : Any = model(_UpperCAmelCase ,_UpperCAmelCase )
# model only returns predictions for masked patches
_a : Any = mask.sum().item()
_a : str = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : List[str] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCAmelCase : List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
def __lowercase ( self : Dict ):
_a : Dict = VideoMAEModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=False ):
_a : Union[str, Any] = copy.deepcopy(_UpperCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
_a : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_a : Optional[Any] = mask.expand(self.model_tester.batch_size ,-1 ).bool()
_a : List[str] = bool_masked_pos.to(_UpperCAmelCase )
if return_labels:
if model_class in [
*get_values(_UpperCAmelCase ),
]:
_a : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_UpperCAmelCase )
return inputs_dict
def __lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : str ):
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = VideoMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
if not self.has_attentions:
pass
else:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = True
for model_class in self.all_model_classes:
_a : Dict = self.model_tester.seq_length - self.model_tester.num_masks
_a : Optional[int] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_a : Union[str, Any] = True
_a : str = False
_a : Optional[int] = True
_a : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Tuple = True
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Dict = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_a : List[str] = len(_UpperCAmelCase )
# Check attention is always last and order is fine
_a : Dict = True
_a : Tuple = True
_a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Dict = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(_UpperCAmelCase ) )
_a : int = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def __lowercase ( self : List[Any] ):
def check_hidden_states_output(_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ):
_a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] = outputs.hidden_states
_a : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCAmelCase ) ,_UpperCAmelCase )
_a : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
_a : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[int] ):
pass
def __lowerCamelCase ( ) -> List[Any]:
_a : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_a : Dict = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : int ):
_a : List[str] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
_UpperCAmelCase )
_a : List[str] = self.default_image_processor
_a : List[str] = prepare_video()
_a : List[str] = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Dict = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
@slow
def __lowercase ( self : Dict ):
_a : int = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(_UpperCAmelCase )
_a : int = self.default_image_processor
_a : str = prepare_video()
_a : str = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# add boolean mask, indicating which patches to mask
_a : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
_a : Optional[int] = torch.load(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
_a : Any = torch.Size([1, 1408, 1536] )
_a : Optional[Any] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ,device=_UpperCAmelCase )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_a : List[Any] = torch.tensor([0.51_42] ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCAmelCase ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_a : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ,norm_pix_loss=_UpperCAmelCase ).to(
_UpperCAmelCase )
with torch.no_grad():
_a : List[str] = model(**_UpperCAmelCase )
_a : int = torch.tensor(torch.tensor([0.64_69] ) ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_UpperCamelCase ) , 'Tatoeba directory does not exist.' )
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : str ):
_a : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase )
@slow
def __lowercase ( self : List[Any] ):
self.resolver.convert_models(['heb-eng'] )
@slow
def __lowercase ( self : Optional[int] ):
_a , _a : str = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=_UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 89 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 1 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 1 |
'''simple docstring'''
import re
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : List[str] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 1 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
_a : int = test_results.split(' ' )
_a : Optional[Any] = 0
_a : Optional[Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a : Union[str, Any] = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : List[str] = {}
_a : str = None
_a : List[str] = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , lowerCAmelCase_ ):
_a : int = True
_a : Any = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
_a : Any = line
_a : Union[str, Any] = False
return failures
class __magic_name__ :
def __init__( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ):
_a : str = title
_a : Tuple = doc_test_results['time_spent'].split(',' )[0]
_a : List[Any] = doc_test_results['success']
_a : Dict = doc_test_results['failures']
_a : Optional[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a : Union[str, Any] = doc_test_results
@property
def __lowercase ( self : Tuple ):
_a : Union[str, Any] = [self._time_spent]
_a : List[str] = 0
for time in time_spent:
_a : Optional[Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_UpperCAmelCase ) == 1:
_a : Union[str, Any] = [0, 0, time_parts[0]]
_a , _a , _a : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_a , _a , _a : Any = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(_UpperCAmelCase )}h{int(_UpperCAmelCase )}m{int(_UpperCAmelCase )}s"""
@property
def __lowercase ( self : int ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowercase ( self : Optional[int] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self : Tuple ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self : Optional[int] ):
_a : List[str] = 40
_a : Optional[int] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_UpperCAmelCase ,_UpperCAmelCase )}
_a : Optional[int] = ''
for category, failures in category_failures.items():
if len(_UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __lowercase ( self : List[Any] ):
_a : List[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_UpperCAmelCase )
@staticmethod
def __lowercase ( ):
_a : Union[str, Any] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=_UpperCAmelCase ,)
def __lowercase ( self : str ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
_a : List[Any] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
_a : Dict = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=_UpperCAmelCase ,)
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ):
_a : Union[str, Any] = ''
for key, value in failures.items():
_a : Optional[Any] = value[:200] + ' [Truncated]' if len(_UpperCAmelCase ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
_a : Tuple = job_name
_a : Union[str, Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
_a : Optional[int] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowercase ( self : List[Any] ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
_a : str = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
_a : Tuple = sorted(self.doc_test_results.items() ,key=lambda _UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
_a : Optional[int] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
_a : List[str] = job_result['failures']
_a : str = self.get_reply_blocks(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,text=_UpperCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=F"""Results for {job}""" ,blocks=_UpperCAmelCase ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Optional[int] = os.environ['GITHUB_RUN_ID']
_a : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_a : Union[str, Any] = requests.get(lowerCAmelCase_ ).json()
_a : List[Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
_a : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
_a : List[Any] = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowerCAmelCase_ )
return {}
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[int]:
_a : Optional[Any] = {}
if os.path.exists(lowerCAmelCase_ ):
_a : Dict = os.listdir(lowerCAmelCase_ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , encoding='utf-8' ) as f:
_a : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}.""" ) from e
return _artifact
def __lowerCamelCase ( ) -> Optional[int]:
class __magic_name__ :
def __init__( self : Tuple ,_UpperCAmelCase : str ):
_a : str = name
_a : Union[str, Any] = []
def __str__( self : Optional[Any] ):
return self.name
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : str ):
self.paths.append({'name': self.name, 'path': path} )
_a : Dict[str, Artifact] = {}
_a : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a : Optional[int] = directory
if artifact_name not in _available_artifacts:
_a : Optional[Any] = Artifact(lowerCAmelCase_ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase = get_job_links()
__lowerCAmelCase = retrieve_available_artifacts()
__lowerCAmelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase = github_actions_job_links.get('''run_doctests''')
__lowerCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
__lowerCAmelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = handle_test_results(artifact['''stats'''])
__lowerCAmelCase = failed
__lowerCAmelCase = success
__lowerCAmelCase = time_spent[1:-1] + ''', '''
__lowerCAmelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
__lowerCAmelCase = line.replace('''FAILED ''', '''''')
__lowerCAmelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase = line.split('''::''')
else:
__lowerCAmelCase , __lowerCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase = all_failures[test] if test in all_failures else '''N/A'''
__lowerCAmelCase = failure
break
__lowerCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 89 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class __magic_name__ :
def __init__( self : Dict ):
_a : list[Any] = []
_a : int = 0
_a : int = 0
def __lowercase ( self : Optional[Any] ):
return self.head == self.tail
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Any ):
self.data.append(_UpperCAmelCase )
_a : Dict = self.tail + 1
def __lowercase ( self : int ):
_a : str = self.data[self.head]
_a : List[Any] = self.head + 1
return ret
def __lowercase ( self : Dict ):
return self.tail - self.head
def __lowercase ( self : List[Any] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : Any ):
_a : Dict = data
_a : MyNode | None = None
_a : MyNode | None = None
_a : int = 1
def __lowercase ( self : Dict ):
return self.data
def __lowercase ( self : Tuple ):
return self.left
def __lowercase ( self : Optional[Any] ):
return self.right
def __lowercase ( self : Optional[Any] ):
return self.height
def __lowercase ( self : str ,_UpperCAmelCase : Any ):
_a : str = data
def __lowercase ( self : str ,_UpperCAmelCase : MyNode | None ):
_a : Tuple = node
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : MyNode | None ):
_a : Tuple = node
def __lowercase ( self : Dict ,_UpperCAmelCase : int ):
_a : Dict = height
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if node is None:
return 0
return node.get_height()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if a > b:
return a
return b
def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode:
print('left rotation node:' , node.get_data() )
_a : str = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase_ )
_a : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase_ )
_a : str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase_ )
return ret
def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode:
print('right rotation node:' , node.get_data() )
_a : Optional[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase_ )
_a : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase_ )
_a : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase_ )
return ret
def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode:
_a : Union[str, Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase_ ) )
return right_rotation(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode:
_a : str = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase_ ) )
return left_rotation(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> MyNode | None:
if node is None:
return MyNode(lowerCAmelCase_ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase_ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_a : Any = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_a : Union[str, Any] = right_rotation(lowerCAmelCase_ )
else:
_a : List[str] = lr_rotation(lowerCAmelCase_ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase_ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_a : str = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_a : Any = rl_rotation(lowerCAmelCase_ )
else:
_a : str = left_rotation(lowerCAmelCase_ )
_a : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase_ )
return node
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
while True:
_a : Tuple = root.get_right()
if right_child is None:
break
_a : Any = right_child
return root.get_data()
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
while True:
_a : Any = root.get_left()
if left_child is None:
break
_a : Optional[int] = left_child
return root.get_data()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> MyNode | None:
_a : Optional[int] = root.get_left()
_a : Optional[int] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_a : int = get_left_most(lowerCAmelCase_ )
root.set_data(lowerCAmelCase_ )
root.set_right(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) )
elif left_child is not None:
_a : Optional[int] = left_child
elif right_child is not None:
_a : Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) )
if get_height(lowerCAmelCase_ ) - get_height(lowerCAmelCase_ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_a : Union[str, Any] = left_rotation(lowerCAmelCase_ )
else:
_a : Dict = rl_rotation(lowerCAmelCase_ )
elif get_height(lowerCAmelCase_ ) - get_height(lowerCAmelCase_ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_a : Any = right_rotation(lowerCAmelCase_ )
else:
_a : int = lr_rotation(lowerCAmelCase_ )
_a : Tuple = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase_ )
return root
class __magic_name__ :
def __init__( self : Tuple ):
_a : MyNode | None = None
def __lowercase ( self : List[Any] ):
return get_height(self.root )
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ):
print('insert:' + str(_UpperCAmelCase ) )
_a : List[Any] = insert_node(self.root ,_UpperCAmelCase )
def __lowercase ( self : Any ,_UpperCAmelCase : Any ):
print('delete:' + str(_UpperCAmelCase ) )
if self.root is None:
print('Tree is empty!' )
return
_a : Union[str, Any] = del_node(self.root ,_UpperCAmelCase )
def __str__( self : int ,): # a level traversale, gives a more intuitive look on the tree
_a : Optional[Any] = ''
_a : Tuple = MyQueue()
q.push(self.root )
_a : Dict = self.get_height()
if layer == 0:
return output
_a : int = 0
while not q.is_empty():
_a : List[str] = q.pop()
_a : Optional[int] = ' ' * int(math.pow(2 ,layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_UpperCAmelCase )
q.push(_UpperCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_a : Dict = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 ,_UpperCAmelCase ) - 1:
_a : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCamelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase = AVLtree()
__lowerCAmelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 89 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Union[str, Any] = 'fnet'
def __init__( self : Optional[int] ,_UpperCAmelCase : Tuple=32000 ,_UpperCAmelCase : Tuple=768 ,_UpperCAmelCase : Dict=12 ,_UpperCAmelCase : str=3072 ,_UpperCAmelCase : int="gelu_new" ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Dict=512 ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : Dict=0.02 ,_UpperCAmelCase : int=1E-12 ,_UpperCAmelCase : Union[str, Any]=False ,_UpperCAmelCase : Union[str, Any]=512 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=2 ,**_UpperCAmelCase : int ,):
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = vocab_size
_a : str = max_position_embeddings
_a : Union[str, Any] = hidden_size
_a : Any = num_hidden_layers
_a : str = intermediate_size
_a : str = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Any = initializer_range
_a : List[Any] = type_vocab_size
_a : List[str] = layer_norm_eps
_a : int = use_tpu_fourier_optimizations
_a : Dict = tpu_short_seq_length
| 89 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
# ===== initialization =====
_a : Any = Mock()
_a : Tuple = conn, Mock()
_a : Dict = iter([1, None] )
_a : List[Any] = lambda lowerCAmelCase_ : next(lowerCAmelCase_ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=lowerCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 89 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if not nums:
return 0
_a : Optional[Any] = nums[0]
_a : int = 0
for num in nums[1:]:
_a , _a : str = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''BeitFeatureExtractor''']
__lowerCAmelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Any = StableDiffusionInstructPixaPixPipeline
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : List[str] ):
torch.manual_seed(0 )
_a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=8 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_a : Tuple = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
_a : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str=0 ):
_a : Dict = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' )
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : str ):
_a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : int = self.get_dummy_components()
_a : int = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
_a : Tuple = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : List[str] = self.get_dummy_inputs(_UpperCAmelCase )
_a : Dict = sd_pipe(**_UpperCAmelCase ).images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Dict = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Any ):
_a : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[int] = self.get_dummy_components()
_a : Any = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
_a : List[Any] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : int = self.get_dummy_inputs(_UpperCAmelCase )
_a : Union[str, Any] = 'french fries'
_a : Union[str, Any] = sd_pipe(**_UpperCAmelCase ,negative_prompt=_UpperCAmelCase )
_a : Dict = output.images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Dict = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Union[str, Any] ):
_a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Union[str, Any] = self.get_dummy_components()
_a : List[str] = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
_a : str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = [inputs['prompt']] * 2
_a : Dict = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
_a : Tuple = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
_a : int = image / 2 + 0.5
_a : List[Any] = image.permute(0 ,3 ,1 ,2 )
_a : Optional[Any] = image.repeat(2 ,1 ,1 ,1 )
_a : List[Any] = sd_pipe(**_UpperCAmelCase ).images
_a : Optional[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_a : int = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Union[str, Any] ):
_a : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[Any] = self.get_dummy_components()
_a : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' )
_a : Dict = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
_a : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Optional[Any] = self.get_dummy_inputs(_UpperCAmelCase )
_a : List[str] = sd_pipe(**_UpperCAmelCase ).images
_a : List[str] = image[0, -3:, -3:, -1]
_a : Tuple = [round(_UpperCAmelCase ,4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowercase ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowercase ( self : List[Any] ):
_a : int = self.get_dummy_components()
_a : Dict = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
_a : Optional[Any] = VaeImageProcessor(do_resize=_UpperCAmelCase ,do_normalize=_UpperCAmelCase )
_a : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : List[Any] = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase ,input_image_type='pt' ) )[0]
_a : List[Any] = components['vae']
_a : List[Any] = self.get_dummy_inputs_by_type(_UpperCAmelCase ,input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_a : int = vae.encode(inputs[image_param] ).latent_dist.mode()
_a : Tuple = pipe(**_UpperCAmelCase )[0]
_a : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase ,1E-4 ,'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ,_UpperCAmelCase : Dict=0 ):
_a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_a : List[Any] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : List[str] ):
_a : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Tuple = self.get_inputs()
_a : List[str] = pipe(**_UpperCAmelCase ).images
_a : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : Tuple = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self : Union[str, Any] ):
_a : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_UpperCAmelCase )
_a : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Tuple = self.get_inputs()
_a : Optional[int] = pipe(**_UpperCAmelCase ).images
_a : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self : List[Any] ):
_a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_UpperCAmelCase )
_a : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : List[Any] = self.get_inputs()
_a : str = pipe(**_UpperCAmelCase ).images
_a : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self : Optional[int] ):
_a : List[Any] = 0
def callback_fn(_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : torch.FloatTensor ) -> None:
_a : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_a : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_a : List[Any] = latents[0, -3:, -3:, -1]
_a : Union[str, Any] = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_a : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_a : Tuple = latents[0, -3:, -3:, -1]
_a : Tuple = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_a : int = False
_a : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_UpperCAmelCase ,torch_dtype=torch.floataa )
_a : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Optional[int] = self.get_inputs()
pipe(**_UpperCAmelCase ,callback=_UpperCAmelCase ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' ,safety_checker=_UpperCAmelCase ,torch_dtype=torch.floataa )
_a : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : Any = self.get_inputs()
_a : Dict = pipe(**_UpperCAmelCase )
_a : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __lowercase ( self : str ):
_a : Any = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_a : Any = inputs['image'].resize((504, 504) )
_a : int = 'timbrooks/instruct-pix2pix'
_a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Optional[int] = pipe(**_UpperCAmelCase )
_a : Any = output.images[0]
_a : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_a : int = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 89 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
@property
def __lowercase ( self : Optional[int] ):
torch.manual_seed(0 )
_a : Dict = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[int] ):
_a : Tuple = self.dummy_uncond_unet
_a : Any = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : int = torch.manual_seed(0 )
_a : Optional[int] = pipe(num_inference_steps=2 ,generator=_UpperCAmelCase ,output_type='numpy' ).images
_a : Any = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 ,generator=_UpperCAmelCase ,output_type='numpy' ,return_dict=_UpperCAmelCase )[0]
_a : Dict = image[0, -3:, -3:, -1]
_a : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
_a : Union[str, Any] = 'google/ncsnpp-celebahq-256'
_a : List[Any] = UNetaDModel.from_pretrained(_UpperCAmelCase )
_a : List[str] = KarrasVeScheduler()
_a : str = KarrasVePipeline(unet=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : List[str] = torch.manual_seed(0 )
_a : List[str] = pipe(num_inference_steps=20 ,generator=_UpperCAmelCase ,output_type='numpy' ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Any = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : List[str] = 0
for ch in input_str:
_a : Optional[Any] = ord(lowerCAmelCase_ )
_a : Tuple = pow(2 , lowerCAmelCase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'upernet'
def __init__( self : List[Any] ,_UpperCAmelCase : str=None ,_UpperCAmelCase : List[Any]=512 ,_UpperCAmelCase : int=0.02 ,_UpperCAmelCase : int=[1, 2, 3, 6] ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Dict=0.4 ,_UpperCAmelCase : Optional[int]=384 ,_UpperCAmelCase : Optional[Any]=256 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Tuple=255 ,**_UpperCAmelCase : int ,):
super().__init__(**_UpperCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : List[Any] = backbone_config.get('model_type' )
_a : List[str] = CONFIG_MAPPING[backbone_model_type]
_a : Optional[int] = config_class.from_dict(_UpperCAmelCase )
_a : Optional[int] = backbone_config
_a : Union[str, Any] = hidden_size
_a : str = initializer_range
_a : Any = pool_scales
_a : str = use_auxiliary_head
_a : Tuple = auxiliary_loss_weight
_a : Optional[Any] = auxiliary_in_channels
_a : Union[str, Any] = auxiliary_channels
_a : List[str] = auxiliary_num_convs
_a : List[str] = auxiliary_concat_input
_a : Any = loss_ignore_index
def __lowercase ( self : Optional[int] ):
_a : int = copy.deepcopy(self.__dict__ )
_a : List[Any] = self.backbone_config.to_dict()
_a : Dict = self.__class__.model_type
return output
| 89 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : list[list[int]] = []
_a : list[int] = []
_a : List[str] = 0
_a : List[str] = sum(lowerCAmelCase_ )
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return result
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
if sum(lowerCAmelCase_ ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase_ )) < max_sum:
return
if sum(lowerCAmelCase_ ) == max_sum:
result.append(lowerCAmelCase_ )
return
for index in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
create_state_space_tree(
lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , [*path, nums[index]] , lowerCAmelCase_ , remaining_nums_sum - nums[index] , )
__lowerCAmelCase = [3, 34, 4, 12, 5, 2]
__lowerCAmelCase = 9
__lowerCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 89 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'open-llama'
def __init__( self : List[str] ,_UpperCAmelCase : str=100000 ,_UpperCAmelCase : Optional[int]=4096 ,_UpperCAmelCase : Dict=11008 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : int="silu" ,_UpperCAmelCase : Tuple=2048 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : List[Any]=1E-6 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : List[str]=False ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Any=None ,**_UpperCAmelCase : Union[str, Any] ,):
_a : str = vocab_size
_a : Dict = max_position_embeddings
_a : Tuple = hidden_size
_a : Any = intermediate_size
_a : Optional[int] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Union[str, Any] = hidden_act
_a : List[str] = initializer_range
_a : List[str] = rms_norm_eps
_a : str = use_cache
_a : Tuple = kwargs.pop(
'use_memorry_efficient_attention' ,_UpperCAmelCase )
_a : Any = hidden_dropout_prob
_a : str = attention_dropout_prob
_a : List[str] = use_stable_embedding
_a : Tuple = shared_input_output_embedding
_a : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_a : Optional[Any] = self.rope_scaling.get('type' ,_UpperCAmelCase )
_a : List[str] = self.rope_scaling.get('factor' ,_UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 89 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
import requests
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_a : Tuple = {'Content-Type': 'application/json'}
_a : Tuple = requests.post(lowerCAmelCase_ , json={'text': message_body} , headers=lowerCAmelCase_ )
if response.status_code != 200:
_a : Optional[int] = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 89 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
if nth_term == "":
return [""]
_a : Optional[int] = int(lowerCAmelCase_ )
_a : int = int(lowerCAmelCase_ )
_a : list[str] = []
for temp in range(int(lowerCAmelCase_ ) ):
series.append(f"""1 / {pow(temp + 1 , int(lowerCAmelCase_ ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = int(input('''Enter the last number (nth term) of the P-Series'''))
__lowerCAmelCase = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 89 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 1 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 1 |
'''simple docstring'''
import cmath
import math
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> complex:
_a : Optional[Any] = math.radians(lowerCAmelCase_ )
_a : List[Any] = math.radians(lowerCAmelCase_ )
# Convert voltage and current to rectangular form
_a : Optional[Any] = cmath.rect(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Tuple = cmath.rect(lowerCAmelCase_ , lowerCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
for i in range(len(lowerCAmelCase_ ) - 1 , 0 , -1 ):
_a : List[Any] = False
for j in range(lowerCAmelCase_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_a , _a : Tuple = unsorted[j - 1], unsorted[j]
_a : Any = True
for j in range(lowerCAmelCase_ ):
if unsorted[j] > unsorted[j + 1]:
_a , _a : Any = unsorted[j + 1], unsorted[j]
_a : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 89 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 1 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCAmelCase = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_a , _a : Union[str, Any] = create_model(
'HTSAT-tiny' , 'roberta' , lowerCAmelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCAmelCase_ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[int] = {}
_a : Tuple = r'.*sequential.(\d+).*'
_a : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : Union[str, Any] = key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
# replace sequential layers with list
_a : List[str] = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 )
_a : Optional[Any] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase_ )//3}.linear.""" )
elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : str = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : Optional[Any] = 1 if projecton_layer == 0 else 2
_a : int = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : str = value
_a : List[str] = mixed_qkv.size(0 ) // 3
_a : str = mixed_qkv[:qkv_dim]
_a : int = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Any = mixed_qkv[qkv_dim * 2 :]
_a : List[Any] = query_layer
_a : Union[str, Any] = key_layer
_a : Tuple = value_layer
else:
_a : Dict = value
return model_state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
_a , _a : Optional[Any] = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Optional[int] = rename_state_dict(lowerCAmelCase_ )
_a : List[str] = ClapConfig()
_a : Tuple = enable_fusion
_a : int = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__lowerCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : Dict = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase_ , lowerCAmelCase_ )
if "finetuned" not in model_name:
_a : List[str] = False
if "finetuned" in model_name:
_a : int = 'huggingface/label-files'
if "kinetics" in model_name:
_a : List[Any] = 400
_a : Optional[int] = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_a : Dict = 174
_a : Union[str, Any] = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_a : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if "small" in model_name:
_a : Tuple = 384
_a : Optional[Any] = 1536
_a : Optional[Any] = 12
_a : Tuple = 16
_a : List[str] = 12
_a : List[str] = 3
_a : Optional[Any] = 192
_a : List[str] = 768
elif "large" in model_name:
_a : Dict = 1024
_a : Dict = 4096
_a : Tuple = 24
_a : Any = 16
_a : Any = 12
_a : Optional[Any] = 8
_a : List[str] = 512
_a : Union[str, Any] = 2048
elif "huge" in model_name:
_a : List[str] = 1280
_a : Dict = 5120
_a : List[Any] = 32
_a : List[Any] = 16
_a : Dict = 12
_a : Any = 8
_a : Dict = 640
_a : Dict = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
if "encoder." in name:
_a : List[Any] = name.replace('encoder.' , '' )
if "cls_token" in name:
_a : Dict = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_a : Dict = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_a : List[Any] = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_a : str = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_a : str = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_a : List[Any] = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_a : int = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_a : str = name.replace('attn' , 'attention.self' )
if "attn" in name:
_a : Optional[int] = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_a : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_a : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_a : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_a : Optional[int] = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_a : List[str] = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_a : Optional[int] = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_a : Optional[int] = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_a : int = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_a : Union[str, Any] = name.replace('head' , 'classifier' )
return name
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_a : Tuple = orig_state_dict.pop(lowerCAmelCase_ )
if key.startswith('encoder.' ):
_a : Dict = key.replace('encoder.' , '' )
if "qkv" in key:
_a : int = key.split('.' )
if key.startswith('decoder.blocks' ):
_a : Optional[Any] = config.decoder_hidden_size
_a : List[str] = int(key_split[2] )
_a : str = 'decoder.decoder_layers.'
if "weight" in key:
_a : Tuple = val[:dim, :]
_a : Any = val[dim : dim * 2, :]
_a : Any = val[-dim:, :]
else:
_a : Any = config.hidden_size
_a : str = int(key_split[1] )
_a : Optional[int] = 'videomae.encoder.layer.'
if "weight" in key:
_a : Any = val[:dim, :]
_a : Optional[Any] = val[dim : dim * 2, :]
_a : Dict = val[-dim:, :]
else:
_a : List[Any] = val
return orig_state_dict
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Tuple = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_a : str = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Tuple = get_videomae_config(lowerCAmelCase_ )
if "finetuned" in model_name:
_a : Optional[int] = VideoMAEForVideoClassification(lowerCAmelCase_ )
else:
_a : str = VideoMAEForPreTraining(lowerCAmelCase_ )
# download original checkpoint, hosted on Google Drive
_a : Any = 'pytorch_model.bin'
gdown.cached_download(lowerCAmelCase_ , lowerCAmelCase_ , quiet=lowerCAmelCase_ )
_a : Optional[Any] = torch.load(lowerCAmelCase_ , map_location='cpu' )
if "model" in files:
_a : int = files['model']
else:
_a : Optional[int] = files['module']
_a : Any = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify model on basic input
_a : str = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_a : List[Any] = prepare_video()
_a : Dict = image_processor(lowerCAmelCase_ , return_tensors='pt' )
if "finetuned" not in model_name:
_a : Dict = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_a : Tuple = torch.load(lowerCAmelCase_ )
_a : int = model(**lowerCAmelCase_ )
_a : int = outputs.logits
_a : Any = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_a : Optional[int] = torch.Size([1, 400] )
_a : str = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
_a : Union[str, Any] = torch.Size([1, 174] )
_a : List[Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
_a : int = torch.Size([1, 1408, 1536] )
_a : Any = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
_a : Optional[int] = torch.Size([1, 1408, 1536] )
_a : Tuple = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_a : Dict = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
_a : str = torch.Size([1, 1408, 1536] )
_a : int = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_a : Optional[int] = torch.Size([1, 400] )
_a : Dict = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_a : Any = torch.Size([1, 400] )
_a : List[Any] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_a : Optional[Any] = torch.Size([1, 400] )
_a : Any = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
_a : List[str] = torch.Size([1, 400] )
_a : Union[str, Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
_a : str = torch.Size([1, 1408, 1536] )
_a : Optional[int] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_a : Optional[int] = torch.Size([1, 174] )
_a : Union[str, Any] = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
_a : List[Any] = torch.Size([1, 1408, 1536] )
_a : Optional[Any] = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_a : List[Any] = torch.Size([1, 174] )
_a : str = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_a : List[str] = outputs.loss
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(lowerCAmelCase_ , organization='nielsr' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 89 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
_a : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_a : int = 'segformer.encoder.' + key
if key.startswith('backbone' ):
_a : int = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_a : List[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
_a : Union[str, Any] = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowerCAmelCase_ )-1}""" )
if "norm" in key:
_a : int = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_a : List[Any] = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_a : str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowerCAmelCase_ )-1}""" )
if "layer_norm1" in key:
_a : str = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_a : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_a : List[Any] = key[key.find('block' ) + len('block' )]
_a : List[Any] = key.replace(f"""block{idx}""" , f"""block.{int(lowerCAmelCase_ )-1}""" )
if "attn.q" in key:
_a : List[Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_a : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_a : Any = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_a : Union[str, Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_a : Tuple = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_a : Optional[int] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_a : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' )
_a : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_a : Tuple = key[key.find('linear_c' ) + len('linear_c' )]
_a : Union[str, Any] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowerCAmelCase_ )-1}""" )
if key.startswith('head' ):
_a : Any = key.replace('head' , 'classifier' )
_a : List[str] = value
return new_state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_a : Dict = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_a : Any = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_a : Any = kv_weight[
: config.hidden_sizes[i], :
]
_a : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
_a : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
_a : Union[str, Any] = kv_bias[
config.hidden_sizes[i] :
]
def __lowerCamelCase ( ) -> Tuple:
_a : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return image
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : str = SegformerConfig()
_a : Optional[int] = False
# set attributes based on model_name
_a : Optional[int] = 'huggingface/label-files'
if "segformer" in model_name:
_a : List[str] = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_a : List[str] = 150
_a : Dict = 'ade20k-id2label.json'
_a : Optional[Any] = (1, 150, 128, 128)
elif "city" in model_name:
_a : Dict = 19
_a : Union[str, Any] = 'cityscapes-id2label.json'
_a : Dict = (1, 19, 128, 128)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
_a : List[str] = True
_a : Dict = model_name[4:6]
_a : List[Any] = 1000
_a : Any = 'imagenet-1k-id2label.json'
_a : Union[str, Any] = (1, 1000)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
_a : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : Optional[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : Any = idalabel
_a : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_a : Tuple = [64, 128, 320, 512]
_a : Any = 256
elif size == "b2":
_a : int = [64, 128, 320, 512]
_a : int = 768
_a : Tuple = [3, 4, 6, 3]
elif size == "b3":
_a : List[str] = [64, 128, 320, 512]
_a : List[Any] = 768
_a : Tuple = [3, 4, 18, 3]
elif size == "b4":
_a : List[Any] = [64, 128, 320, 512]
_a : List[Any] = 768
_a : str = [3, 8, 27, 3]
elif size == "b5":
_a : Optional[int] = [64, 128, 320, 512]
_a : Dict = 768
_a : Tuple = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_a : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_ )
# prepare image
_a : List[Any] = prepare_img()
_a : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_a : List[Any] = torch.load(lowerCAmelCase_ , map_location=torch.device('cpu' ) )
else:
_a : Optional[Any] = torch.load(lowerCAmelCase_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
_a : Union[str, Any] = rename_keys(lowerCAmelCase_ , encoder_only=lowerCAmelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
_a : Tuple = False
_a : List[Any] = SegformerForImageClassification(lowerCAmelCase_ )
else:
_a : Union[str, Any] = SegformerForSemanticSegmentation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# forward pass
_a : Union[str, Any] = model(lowerCAmelCase_ )
_a : List[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_a : List[str] = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_a : Union[str, Any] = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_a : str = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_a : Tuple = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_a : List[Any] = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_a : int = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_a : str = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_a : Tuple = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_a : Any = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_a : Union[str, Any] = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_a : Dict = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_a : Any = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_a : Any = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_a : Dict = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_a : str = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
_a : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 89 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 1 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 89 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
def __init__( self : Union[str, Any] ,_UpperCAmelCase : T ):
_a : str = data
_a : Node[T] | None = None
def __str__( self : List[str] ):
return F"""{self.data}"""
class __magic_name__ ( Generic[T] ):
def __init__( self : Union[str, Any] ):
_a : Node[T] | None = None
def __iter__( self : Dict ):
_a : List[Any] = self.top
while node:
yield node.data
_a : Union[str, Any] = node.next
def __str__( self : Dict ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self : Optional[Any] ):
return len(tuple(iter(self ) ) )
def __lowercase ( self : List[Any] ):
return self.top is None
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : T ):
_a : Union[str, Any] = Node(_UpperCAmelCase )
if not self.is_empty():
_a : Any = self.top
_a : List[Any] = node
def __lowercase ( self : Tuple ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_UpperCAmelCase )
_a : Dict = self.top
_a : str = self.top.next
return pop_node.data
def __lowercase ( self : str ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[Any] ):
_a : Dict = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 89 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__lowerCAmelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__lowerCAmelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__lowerCAmelCase = reader.read()
__lowerCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__lowerCAmelCase = UNetaDModel(**config)
else:
__lowerCAmelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__lowerCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowerCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowerCAmelCase = config[key]
del config[key]
__lowerCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__lowerCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__lowerCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__lowerCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__lowerCAmelCase = param_value
__lowerCAmelCase = True
if not has_changed:
__lowerCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any=13 ,_UpperCAmelCase : Optional[Any]=7 ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : int=99 ,_UpperCAmelCase : int=32 ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : str=4 ,_UpperCAmelCase : Optional[int]=37 ,_UpperCAmelCase : Union[str, Any]="gelu" ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : Optional[Any]=512 ,_UpperCAmelCase : Any=16 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Dict=0.02 ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Tuple="None" ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : Dict=4 ,_UpperCAmelCase : Dict=None ,):
_a : str = parent
_a : str = batch_size
_a : int = seq_length
_a : Tuple = is_training
_a : Dict = use_input_mask
_a : Optional[int] = use_token_type_ids
_a : str = use_labels
_a : Optional[Any] = vocab_size
_a : Any = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : int = intermediate_size
_a : Union[str, Any] = hidden_act
_a : int = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Optional[int] = max_position_embeddings
_a : Tuple = type_vocab_size
_a : List[str] = type_sequence_label_size
_a : Tuple = initializer_range
_a : int = num_labels
_a : Tuple = num_choices
_a : List[Any] = relative_attention
_a : Optional[int] = position_biased_input
_a : Union[str, Any] = pos_att_type
_a : List[Any] = scope
def __lowercase ( self : List[Any] ):
_a : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_a : Optional[int] = None
if self.use_input_mask:
_a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Tuple = None
if self.use_token_type_ids:
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_a : List[Any] = None
_a : Optional[Any] = None
_a : Optional[int] = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_a : Dict = DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,initializer_range=self.initializer_range ,return_dict=_UpperCAmelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ):
_a : List[Any] = TFDebertaVaModel(config=_UpperCAmelCase )
_a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_a : Optional[Any] = [input_ids, input_mask]
_a : str = model(_UpperCAmelCase )
_a : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ):
_a : Optional[Any] = TFDebertaVaForMaskedLM(config=_UpperCAmelCase )
_a : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ):
_a : Any = self.num_labels
_a : Union[str, Any] = TFDebertaVaForSequenceClassification(config=_UpperCAmelCase )
_a : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ):
_a : Tuple = self.num_labels
_a : Dict = TFDebertaVaForTokenClassification(config=_UpperCAmelCase )
_a : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[str] = TFDebertaVaForQuestionAnswering(config=_UpperCAmelCase )
_a : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_a : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : Union[str, Any] ):
_a : Dict = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Tuple = config_and_inputs
_a : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : Any = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase : str = False
lowerCAmelCase : int = False
def __lowercase ( self : Dict ):
_a : str = TFDebertaVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
_a : Any = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __lowercase ( self : List[str] ):
pass
@slow
def __lowercase ( self : int ):
_a : str = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_a : List[Any] = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_a : Union[str, Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a : Tuple = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )[0]
_a : Tuple = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] ,_UpperCAmelCase ,atol=1E-4 )
| 89 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[np.ndarray, np.ndarray]:
_a , _a : List[Any] = np.shape(lowerCAmelCase_ )
if rows != columns:
_a : Any = (
'\'table\' has to be of square shaped array but got a '
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowerCAmelCase_ )
_a : str = np.zeros((rows, columns) )
_a : Dict = np.zeros((rows, columns) )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
_a : Any = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_a : Union[str, Any] = (table[i][j] - total) / upper[j][j]
_a : int = 1
for j in range(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : Tuple = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase_ ) )
_a : List[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'camembert'
def __init__( self : Dict ,_UpperCAmelCase : Any=30522 ,_UpperCAmelCase : Any=768 ,_UpperCAmelCase : Optional[int]=12 ,_UpperCAmelCase : str=12 ,_UpperCAmelCase : List[str]=3072 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Optional[int]=512 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : Union[str, Any]=1E-12 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : str=0 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : int="absolute" ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Optional[int]=None ,**_UpperCAmelCase : List[str] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Any = vocab_size
_a : Optional[Any] = hidden_size
_a : List[Any] = num_hidden_layers
_a : str = num_attention_heads
_a : List[str] = hidden_act
_a : str = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : str = initializer_range
_a : Tuple = layer_norm_eps
_a : Union[str, Any] = position_embedding_type
_a : List[str] = use_cache
_a : Tuple = classifier_dropout
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : str ):
if self.task == "multiple-choice":
_a : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 89 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 1 |
'''simple docstring'''
import string
def __lowerCamelCase ( lowerCAmelCase_ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_a : Union[str, Any] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_a : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase_ )
_a : List[str] = num - key
if num < 0:
_a : str = num + len(string.ascii_uppercase )
_a : int = translated + string.ascii_uppercase[num]
else:
_a : Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def __lowerCamelCase ( ) -> None:
_a : int = input('Encrypted message: ' )
_a : Tuple = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Dict = LEDTokenizer
lowerCAmelCase : List[str] = LEDTokenizerFast
lowerCAmelCase : List[Any] = True
def __lowercase ( self : List[str] ):
super().setUp()
_a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_a : Tuple = dict(zip(_UpperCAmelCase ,range(len(_UpperCAmelCase ) ) ) )
_a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_a : Tuple = {'unk_token': '<unk>'}
_a : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def __lowercase ( self : List[Any] ,**_UpperCAmelCase : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Dict ,**_UpperCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Any ,_UpperCAmelCase : Any ):
return "lower newer", "lower newer"
@cached_property
def __lowercase ( self : str ):
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __lowercase ( self : Optional[Any] ):
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __lowercase ( self : List[str] ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Optional[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : List[Any] = tokenizer(_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_a : int = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@require_torch
def __lowercase ( self : Union[str, Any] ):
_a : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : int = tokenizer(_UpperCAmelCase ,padding=_UpperCAmelCase ,return_tensors='pt' )
self.assertIn('input_ids' ,_UpperCAmelCase )
self.assertIn('attention_mask' ,_UpperCAmelCase )
self.assertNotIn('labels' ,_UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' ,_UpperCAmelCase )
@require_torch
def __lowercase ( self : str ):
_a : List[Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Dict = tokenizer(text_target=_UpperCAmelCase ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def __lowercase ( self : int ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Any = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual(batch.input_ids.shape ,(2, 5122) )
@require_torch
def __lowercase ( self : List[str] ):
_a : int = ['A long paragraph for summarization.']
_a : int = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Dict = tokenizer(_UpperCAmelCase ,return_tensors='pt' )
_a : List[str] = tokenizer(text_target=_UpperCAmelCase ,return_tensors='pt' )
_a : List[Any] = inputs['input_ids']
_a : Tuple = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowercase ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a : Optional[Any] = ['Summary of the text.', 'Another summary.']
_a : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a : List[Any] = tokenizer(_UpperCAmelCase ,padding=_UpperCAmelCase )
_a : int = [[0] * len(_UpperCAmelCase ) for x in encoded_output['input_ids']]
_a : List[Any] = tokenizer.pad(_UpperCAmelCase )
self.assertSequenceEqual(outputs['global_attention_mask'] ,_UpperCAmelCase )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Dict = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = 'A, <mask> AllenNLP sentence.'
_a : int = tokenizer_r.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase )
_a : List[Any] = tokenizer_p.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_a : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_a : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCAmelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 89 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : str = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
_a : Union[str, Any] = Vector()
def __lowercase ( self : int ):
_a : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) ,'(0,0,0,0,0,1)' )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) ,4 )
def __lowercase ( self : Optional[int] ):
_a : Dict = Vector([1, 2] )
_a : Optional[Any] = Vector([1, 2, 3, 4, 5] )
_a : Tuple = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.2_36 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.4_16 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.6_16 ,3 )
def __lowercase ( self : Any ):
_a : Dict = Vector([1, 2, 3] )
_a : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def __lowercase ( self : Tuple ):
_a : str = Vector([1, 2, 3] )
_a : int = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def __lowercase ( self : Dict ):
_a : Tuple = Vector([1, 2, 3] )
_a : int = Vector([2, -1, 4] ) # for test of dot product
_a : Dict = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,'(3.0,6.0,9.0)' )
self.assertEqual((a * b) ,0 )
def __lowercase ( self : List[Any] ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) ,10 )
def __lowercase ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,'(0,1,0)' )
def __lowercase ( self : Optional[Any] ):
_a : Optional[Any] = Vector([1, 2, 3] )
_a : Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,_UpperCAmelCase ,_UpperCAmelCase ) ) ,'(3,4,7)' )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_a : Union[str, Any] = x.copy()
self.assertEqual(str(_UpperCAmelCase ) ,str(_UpperCAmelCase ) )
def __lowercase ( self : Tuple ):
_a : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(_UpperCAmelCase ) ,'(0,1,0)' )
def __lowercase ( self : List[Any] ):
_a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' ,str(_UpperCAmelCase ) )
def __lowercase ( self : Dict ):
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(_UpperCAmelCase ,_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : List[str] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(_UpperCAmelCase ,_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def __lowercase ( self : List[str] ):
_a : Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
_a : Any = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' ,str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' ,str(a * 2 ) )
def __lowercase ( self : List[Any] ):
_a : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' ,str(_UpperCAmelCase ) )
def __lowercase ( self : List[str] ):
_a : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.01 )
def __lowercase ( self : Tuple ):
_a : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' ,str(a + b ) )
def __lowercase ( self : Dict ):
_a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
_a : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' ,str(a - b ) )
def __lowercase ( self : Dict ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'xglm'
lowerCAmelCase : Optional[Any] = ['past_key_values']
lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,_UpperCAmelCase : Dict=256008 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : Optional[int]=1024 ,_UpperCAmelCase : int=4096 ,_UpperCAmelCase : List[Any]=24 ,_UpperCAmelCase : str=16 ,_UpperCAmelCase : Optional[Any]="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Any=0.0 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : Any=1 ,_UpperCAmelCase : List[str]=0 ,_UpperCAmelCase : Optional[Any]=2 ,**_UpperCAmelCase : Union[str, Any] ,):
_a : Optional[int] = vocab_size
_a : str = max_position_embeddings
_a : Any = d_model
_a : List[str] = ffn_dim
_a : Tuple = num_layers
_a : Optional[int] = attention_heads
_a : Dict = activation_function
_a : Optional[int] = dropout
_a : Union[str, Any] = attention_dropout
_a : List[Any] = activation_dropout
_a : Dict = layerdrop
_a : Any = init_std
_a : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
_a : Optional[Any] = use_cache
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,decoder_start_token_id=_UpperCAmelCase ,**_UpperCAmelCase ,)
| 89 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 1 |
'''simple docstring'''
from __future__ import annotations
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : Union[str, Any]=None ):
_a : str = data
_a : Dict = None
def __repr__( self : str ):
_a : Any = []
_a : List[str] = self
while temp:
string_rep.append(F"""{temp.data}""" )
_a : int = temp.next
return "->".join(_UpperCAmelCase )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
if not elements_list:
raise Exception('The Elements List is empty' )
_a : List[str] = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = Node(elements_list[i] )
_a : Any = current.next
return head
def __lowerCamelCase ( lowerCAmelCase_ ) -> None:
if head_node is not None and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def __lowerCamelCase ( ) -> List[str]:
from doctest import testmod
testmod()
_a : str = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCAmelCase_ )
print('Elements in Reverse:' )
print_reverse(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 89 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'Salesforce/blip-image-captioning-base'
lowerCAmelCase : Tuple = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCAmelCase : Optional[int] = 'image_captioner'
lowerCAmelCase : List[str] = AutoModelForVisionaSeq
lowerCAmelCase : Tuple = ['image']
lowerCAmelCase : Optional[Any] = ['text']
def __init__( self : Dict ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : "Image" ):
return self.pre_processor(images=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ):
return self.model.generate(**_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Dict ):
return self.pre_processor.batch_decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )[0].strip()
| 89 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowerCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__lowerCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase = '''roberta'''
elif args.model_type == "gpt2":
__lowerCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__lowerCAmelCase = '''transformer'''
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__lowerCAmelCase = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__lowerCAmelCase = f"""{prefix}.embeddings.{w}.weight"""
__lowerCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__lowerCAmelCase = f"""{prefix}.embeddings.LayerNorm.{w}"""
__lowerCAmelCase = state_dict[param_name]
# Transformer Blocks #
__lowerCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__lowerCAmelCase = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__lowerCAmelCase = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[f"""lm_head.dense.{w}"""]
__lowerCAmelCase = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__lowerCAmelCase = state_dict[f"""{prefix}.ln_f.{w}"""]
__lowerCAmelCase = state_dict['''lm_head.weight''']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 89 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCamelCase ):
lowerCAmelCase : str = ['note_seq']
def __init__( self : Tuple ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['note_seq'] )
@classmethod
def __lowercase ( cls : List[Any] ,*_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[Any] ):
requires_backends(cls ,['note_seq'] )
@classmethod
def __lowercase ( cls : Union[str, Any] ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Any ):
requires_backends(cls ,['note_seq'] )
| 89 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 1_024}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : List[str]="<s>" ,_UpperCAmelCase : str="</s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : List[str]="<s>" ,_UpperCAmelCase : Tuple="<unk>" ,_UpperCAmelCase : Optional[int]="<pad>" ,_UpperCAmelCase : int="<mask>" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,**_UpperCAmelCase : List[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
_a : Any = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token
_a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCAmelCase ,)
_a : Any = vocab_file
_a : int = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : int = {}
_a : Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_a : Any = cnt
cnt += 1
with open(_UpperCAmelCase ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : List[str] = line.strip().split()[0]
_a : List[str] = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_a : Any = len(self.fairseq_tokens_to_ids )
_a : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
_a : Union[str, Any] = self.__dict__.copy()
_a : List[str] = None
_a : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_UpperCAmelCase : Optional[int] ):
_a : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Optional[int] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
_a : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Optional[int] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Optional[Any] ):
_a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ):
return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase )
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ):
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ):
_a : str = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase ,' ' ).strip()
return out_string
def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[Any] = os.path.join(
_UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase ,'wb' ) as fi:
_a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_UpperCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 89 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_a : List[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_a : Optional[Any] = 'The dog is cute and lives in the garden house'
_a : int = jnp.array([tokenizer.encode(_UpperCAmelCase )] )
_a : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_a : Tuple = model(_UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape ,_UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_UpperCAmelCase ,atol=1E-3 ) )
| 89 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_a : List[Any] = 0
if start < end:
_a : Union[str, Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = a[end]
_a : Optional[int] = a[pivot]
_a : int = temp
_a , _a : Union[str, Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : List[str] = 0
_a : Dict = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Union[str, Any] = a[end]
_a : Tuple = a[pivot]
_a : Dict = temp
_a : int = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : List[str] = new_pivot_index + 1
_a : List[Any] = a[new_pivot_index]
_a : List[str] = a[index]
_a : Tuple = temp
_a : str = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : List[str] = temp
return new_pivot_index + 1, count
__lowerCAmelCase = TemporaryFile()
__lowerCAmelCase = 100 # 1000 elements are to be sorted
__lowerCAmelCase , __lowerCAmelCase = 0, 1 # mean and standard deviation
__lowerCAmelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
__lowerCAmelCase = np.load(outfile)
__lowerCAmelCase = len(M) - 1
__lowerCAmelCase = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 89 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
lowerCAmelCase : str
lowerCAmelCase : List[str]
lowerCAmelCase : Optional[List[str]]
@dataclass
class __magic_name__ :
lowerCAmelCase : List[int]
lowerCAmelCase : List[int]
lowerCAmelCase : Optional[List[int]] = None
lowerCAmelCase : Optional[List[int]] = None
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = 'train'
lowerCAmelCase : List[str] = 'dev'
lowerCAmelCase : Optional[int] = 'test'
class __magic_name__ :
@staticmethod
def __lowercase ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def __lowercase ( _UpperCAmelCase : str ):
raise NotImplementedError
@staticmethod
def __lowercase ( _UpperCAmelCase : List[InputExample] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : Dict=False ,_UpperCAmelCase : int="[CLS]" ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : Optional[int]="[SEP]" ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Tuple=0 ,_UpperCAmelCase : Any=0 ,_UpperCAmelCase : int=-100 ,_UpperCAmelCase : List[Any]=0 ,_UpperCAmelCase : str=True ,):
_a : str = {label: i for i, label in enumerate(_UpperCAmelCase )}
_a : str = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' ,_UpperCAmelCase ,len(_UpperCAmelCase ) )
_a : Optional[int] = []
_a : Union[str, Any] = []
for word, label in zip(example.words ,example.labels ):
_a : str = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : List[Any] = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
_a : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)]
_a : Optional[int] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : str = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : List[Any] = [cls_token] + tokens
_a : Any = [pad_token_label_id] + label_ids
_a : List[str] = [cls_token_segment_id] + segment_ids
_a : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Dict = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
_a : int = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
_a : Optional[Any] = ([pad_token] * padding_length) + input_ids
_a : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : str = ([pad_token_segment_id] * padding_length) + segment_ids
_a : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,token_type_ids=_UpperCAmelCase ,label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[InputFeatures]
lowerCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int ,_UpperCAmelCase : TokenClassificationTask ,_UpperCAmelCase : str ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Split = Split.train ,):
# Load data features from cache or dataset file
_a : str = os.path.join(
_UpperCAmelCase ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(_UpperCAmelCase ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Tuple = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_a : int = torch.load(_UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_a : List[str] = token_classification_task.read_examples_from_file(_UpperCAmelCase ,_UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] = token_classification_task.convert_examples_to_features(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_UpperCAmelCase ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features ,_UpperCAmelCase )
def __len__( self : Any ):
return len(self.features )
def __getitem__( self : Tuple ,_UpperCAmelCase : Dict ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
lowerCAmelCase : List[InputFeatures]
lowerCAmelCase : int = -1_0_0
def __init__( self : str ,_UpperCAmelCase : TokenClassificationTask ,_UpperCAmelCase : str ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Split = Split.train ,):
_a : List[Any] = token_classification_task.read_examples_from_file(_UpperCAmelCase ,_UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_a : Dict = token_classification_task.convert_examples_to_features(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_UpperCAmelCase ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Optional[Any] = tf.data.Dataset.from_generator(
_UpperCAmelCase ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
_a : str = tf.data.Dataset.from_generator(
_UpperCAmelCase ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def __lowercase ( self : str ):
_a : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : str ,_UpperCAmelCase : int ):
return self.features[i]
| 89 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
__lowerCAmelCase = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = LxmertTokenizer
def __init__( self : Tuple ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Union[str, Any]="[UNK]" ,_UpperCAmelCase : Any="[SEP]" ,_UpperCAmelCase : Optional[Any]="[PAD]" ,_UpperCAmelCase : Tuple="[CLS]" ,_UpperCAmelCase : List[Any]="[MASK]" ,_UpperCAmelCase : str=True ,_UpperCAmelCase : str=None ,**_UpperCAmelCase : Tuple ,):
super().__init__(
_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,do_lower_case=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,tokenize_chinese_chars=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' ,_UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_UpperCAmelCase ) != tokenize_chinese_chars
):
_a : Optional[Any] = getattr(_UpperCAmelCase ,normalizer_state.pop('type' ) )
_a : Tuple = do_lower_case
_a : Union[str, Any] = strip_accents
_a : Union[str, Any] = tokenize_chinese_chars
_a : Dict = normalizer_class(**_UpperCAmelCase )
_a : Tuple = do_lower_case
def __lowercase ( self : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any]=None ):
_a : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : List[str] = [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : List[Any] = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 89 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 1 |
import os
def _a ( ) -> Union[str, Any]:
with open(os.path.dirname(a ) + '''/p022_names.txt''' ) as file:
a = str(file.readlines()[0] )
a = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
a = 0
a = 0
for i, name in enumerate(a ):
for letter in name:
name_score += ord(a ) - 64
total_score += (i + 1) * name_score
a = 0
return total_score
if __name__ == "__main__":
print(solution())
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__(self : Union[str, Any] , __a : Tuple=None , **__a : List[Any] ):
super().__init__(features=__a )
UpperCAmelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowercase (self : Optional[Any] , __a : int ):
import torch
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__a )
return column
def _lowercase (self : Optional[Any] , __a : Tuple ):
import torch
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase_ = {"dtype": torch.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(__a )
return torch.tensor(__a , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowercase (self : int , __a : str ):
import torch
# support for torch, tf, jax etc.
if hasattr(__a , "__array__" ) and not isinstance(__a , torch.Tensor ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def _lowercase (self : Any , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def _lowercase (self : int , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def _lowercase (self : Union[str, Any] , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(__a )
UpperCAmelCase_ = self._consolidate(__a )
return column
def _lowercase (self : Tuple , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a )
UpperCAmelCase_ = self.recursive_tensorize(__a )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 1 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Any , UpperCamelCase : int = 0 ):
'''simple docstring'''
lowercase__ = key
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = ''''''
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = ''''''
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 2 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Tuple = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
A : int = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
A : Optional[int] = getattr(snake_case__ , snake_case__ ).shape
else:
A : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A : Optional[int] = value
elif weight_type == "weight_g":
A : str = value
elif weight_type == "weight_v":
A : str = value
elif weight_type == "bias":
A : List[str] = value
else:
A : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = []
A : Optional[int] = fairseq_model.state_dict()
A : Optional[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : Tuple = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
A : str = True
else:
for key, mapped_key in MAPPING.items():
A : str = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A : Union[str, Any] = True
if "*" in mapped_key:
A : List[Any] = name.split(snake_case__ )[0].split('''.''' )[-2]
A : Optional[int] = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
A : List[str] = '''weight_g'''
elif "weight_v" in name:
A : Union[str, Any] = '''weight_v'''
elif "weight" in name:
A : Dict = '''weight'''
elif "bias" in name:
A : Dict = '''bias'''
else:
A : Optional[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
A : List[Any] = name.split('''.''' )
A : Union[str, Any] = int(items[0] )
A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A : Optional[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A : Optional[int] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = SEWConfig()
if is_finetuned:
A : List[Any] = model.wav_encoder.wav_model.cfg
else:
A : str = model.cfg
A : int = fs_config.conv_bias
A : Optional[int] = eval(fs_config.conv_feature_layers )
A : Union[str, Any] = [x[0] for x in conv_layers]
A : Dict = [x[1] for x in conv_layers]
A : Dict = [x[2] for x in conv_layers]
A : str = '''gelu'''
A : Tuple = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
A : Optional[Any] = 0.0
A : List[str] = fs_config.activation_fn.name
A : List[str] = fs_config.encoder_embed_dim
A : Optional[Any] = 0.02
A : Any = fs_config.encoder_ffn_embed_dim
A : str = 1E-5
A : str = fs_config.encoder_layerdrop
A : List[str] = fs_config.encoder_attention_heads
A : List[str] = fs_config.conv_pos_groups
A : str = fs_config.conv_pos
A : Union[str, Any] = len(snake_case__ )
A : str = fs_config.encoder_layers
A : List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A : Optional[int] = model.cfg
A : Union[str, Any] = fs_config.final_dropout
A : Optional[int] = fs_config.layerdrop
A : str = fs_config.activation_dropout
A : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A : Dict = fs_config.attention_dropout
A : List[str] = fs_config.dropout_input
A : Union[str, Any] = fs_config.dropout
A : int = fs_config.mask_channel_length
A : str = fs_config.mask_channel_prob
A : List[str] = fs_config.mask_length
A : Tuple = fs_config.mask_prob
A : Optional[int] = '''Wav2Vec2FeatureExtractor'''
A : Union[str, Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
if is_finetuned:
A, A, A : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A, A, A : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A : List[str] = SEWConfig.from_pretrained(snake_case__ )
else:
A : Dict = convert_config(model[0] , snake_case__ )
A : str = model[0].eval()
A : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
A : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
A : Optional[Any] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : Optional[Any] = target_dict.pad_index
A : Optional[Any] = target_dict.bos_index
A : Union[str, Any] = target_dict.pad_index
A : Union[str, Any] = target_dict.bos_index
A : int = target_dict.eos_index
A : Tuple = len(target_dict.symbols )
A : List[str] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
A : str = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
A : List[str] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
A : Tuple = SEWForCTC(snake_case__ )
else:
A : Dict = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase : Any = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Any , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase__ : Tuple[int] = (6_4,) , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : float = 0.18_215 , UpperCAmelCase__ : str = "group" , ) -> int:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , down_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , double_z=UpperCAmelCase__ , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
lowerCAmelCase = VectorQuantizer(UpperCAmelCase__ , UpperCAmelCase__ , beta=0.25 , remap=UpperCAmelCase__ , sane_index_shape=UpperCAmelCase__ )
lowerCAmelCase = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , up_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , norm_type=UpperCAmelCase__ , )
@apply_forward_hook
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(UpperCAmelCase__ )
lowerCAmelCase = self.quant_conv(UpperCAmelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase__ )
@apply_forward_hook
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(UpperCAmelCase__ )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(UpperCAmelCase__ )
lowerCAmelCase = self.decoder(UpperCAmelCase__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(UpperCAmelCase__ ).latents
lowerCAmelCase = self.decode(UpperCAmelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
| 4 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_0 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=None , ) -> str:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =use_labels
_lowercase =scope
def __A (self ) -> int:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =self.get_config()
return config, input_ids, input_mask, token_labels
def __A (self ) -> Any:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A (self ) -> Dict:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =self.prepare_config_and_inputs()
_lowercase =True
_lowercase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> Tuple:
_lowercase =BertGenerationEncoder(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> List[str]:
_lowercase =True
_lowercase =BertGenerationEncoder(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> Dict:
_lowercase =True
_lowercase =True
_lowercase =BertGenerationDecoder(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
# first forward pass
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
_lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , ) -> Optional[int]:
_lowercase =BertGenerationDecoder(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self ) -> Tuple:
_lowercase , _lowercase , _lowercase , _lowercase =self.prepare_config_and_inputs()
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (BertGenerationDecoder,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __A (self ) -> Dict:
_lowercase =BertGenerationEncoderTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> List[str]:
self.config_tester.run_common_tests()
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase ='''bert'''
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase )
def __A (self ) -> int:
# This regression test was failing with PyTorch < 1.3
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase =None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase )
@slow
def __A (self ) -> Union[str, Any]:
_lowercase =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> List[str]:
_lowercase =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowercase =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowercase =model(UpperCAmelCase )[0]
_lowercase =torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , UpperCAmelCase )
_lowercase =torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Dict:
_lowercase =BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowercase =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowercase =model(UpperCAmelCase )[0]
_lowercase =torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , UpperCAmelCase )
_lowercase =torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import random
class __A:
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case ) -> tuple[list[int], list[int]]:
'''simple docstring'''
__a = [ord(_snake_case ) for i in text]
__a = []
__a = []
for i in plain:
__a = random.randint(1 , 300 )
__a = (i + k) * k
cipher.append(_snake_case )
key.append(_snake_case )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = []
for i in range(len(_snake_case ) ):
__a = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_snake_case ) )
return "".join(_snake_case )
if __name__ == "__main__":
A , A : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k)) | 6 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BertJapaneseTokenizer
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
super().setUp()
A__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case__ ( self : str,lowercase_ : Union[str, Any] )-> str:
'''simple docstring'''
A__ = 'こんにちは、世界。 \nこんばんは、世界。'
A__ = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def snake_case__ ( self : str,lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ , A__ = self.get_input_output_texts(lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = tokenizer.decode(lowercase_,clean_up_tokenization_spaces=lowercase_ )
return text, ids
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
pass # TODO add if relevant
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
pass # TODO add if relevant
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowercase_,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.tokenizer_class(self.vocab_file,word_tokenizer_type='mecab' )
self.assertIsNotNone(lowercase_ )
A__ = 'こんにちは、世界。\nこんばんは、世界。'
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
A__ = os.path.join(self.tmpdirname,'tokenizer.bin' )
with open(lowercase_,'wb' ) as handle:
pickle.dump(lowercase_,lowercase_ )
with open(lowercase_,'rb' ) as handle:
A__ = pickle.load(lowercase_ )
A__ = tokenizer_new.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int )-> str:
'''simple docstring'''
A__ = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'],)
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
try:
A__ = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'],)
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
try:
A__ = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'],)
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = MecabTokenizer(do_lower_case=lowercase_,mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'],)
def snake_case__ ( self : str )-> int:
'''simple docstring'''
try:
A__ = MecabTokenizer(
do_lower_case=lowercase_,normalize_text=lowercase_,mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'],)
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
A__ = MecabTokenizer(normalize_text=lowercase_,mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'],)
@require_sudachi
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.tokenizer_class(self.vocab_file,word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowercase_ )
A__ = 'こんにちは、世界。\nこんばんは、世界。'
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
A__ = os.path.join(self.tmpdirname,'tokenizer.bin' )
with open(lowercase_,'wb' ) as handle:
pickle.dump(lowercase_,lowercase_ )
with open(lowercase_,'rb' ) as handle:
A__ = pickle.load(lowercase_ )
A__ = tokenizer_new.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
@require_sudachi
def snake_case__ ( self : List[str] )-> str:
'''simple docstring'''
A__ = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),[' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '],)
@require_sudachi
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = SudachiTokenizer(sudachi_dict_type='core',sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ),['外国', '人', '参政', '権'] )
@require_sudachi
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
A__ = SudachiTokenizer(sudachi_dict_type='core',sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ),['外国人', '参政権'] )
@require_sudachi
def snake_case__ ( self : str )-> int:
'''simple docstring'''
A__ = SudachiTokenizer(sudachi_dict_type='core',sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ),['外国人参政権'] )
@require_sudachi
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ = SudachiTokenizer(do_lower_case=lowercase_,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),[' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '],)
@require_sudachi
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = SudachiTokenizer(normalize_text=lowercase_,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),[' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '],)
@require_sudachi
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
A__ = SudachiTokenizer(trim_whitespace=lowercase_,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'],)
@require_jumanpp
def snake_case__ ( self : int )-> Tuple:
'''simple docstring'''
A__ = self.tokenizer_class(self.vocab_file,word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowercase_ )
A__ = 'こんにちは、世界。\nこんばんは、世界。'
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
A__ = os.path.join(self.tmpdirname,'tokenizer.bin' )
with open(lowercase_,'wb' ) as handle:
pickle.dump(lowercase_,lowercase_ )
with open(lowercase_,'rb' ) as handle:
A__ = pickle.load(lowercase_ )
A__ = tokenizer_new.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
@require_jumanpp
def snake_case__ ( self : Dict )-> str:
'''simple docstring'''
A__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'],)
@require_jumanpp
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = JumanppTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'],)
@require_jumanpp
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = JumanppTokenizer(normalize_text=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'],)
@require_jumanpp
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = JumanppTokenizer(trim_whitespace=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ),['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'],)
@require_jumanpp
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ),['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'],)
def snake_case__ ( self : Tuple )-> List[str]:
'''simple docstring'''
A__ = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
A__ = {}
for i, token in enumerate(lowercase_ ):
A__ = i
A__ = WordpieceTokenizer(vocab=lowercase_,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ),[] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ),['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ),['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ),['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
A__ = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
A__ = tokenizer.subword_tokenizer
A__ = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowercase_,['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
A__ = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowercase_,['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
A__ = tokenizer.encode('ありがとう。',add_special_tokens=lowercase_ )
A__ = tokenizer.encode('どういたしまして。',add_special_tokens=lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BertJapaneseTokenizer
lowerCamelCase = False
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
super().setUp()
A__ = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case__ ( self : Any,**lowercase_ : Any )-> Any:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname,subword_tokenizer_type='character',**lowercase_ )
def snake_case__ ( self : Any,lowercase_ : int )-> Union[str, Any]:
'''simple docstring'''
A__ = 'こんにちは、世界。 \nこんばんは、世界。'
A__ = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = self.tokenizer_class(self.vocab_file,subword_tokenizer_type='character' )
A__ = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowercase_,['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
A__ = {}
for i, token in enumerate(lowercase_ ):
A__ = i
A__ = CharacterTokenizer(vocab=lowercase_,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ),[] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ),['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ),['こ', 'ん', 'に', 'ち', '[UNK]'] )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
A__ = tokenizer.encode('ありがとう。',add_special_tokens=lowercase_ )
A__ = tokenizer.encode('どういたしまして。',add_special_tokens=lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = 'cl-tohoku/bert-base-japanese'
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers',level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowercase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
A__ = 'bert-base-cased'
with self.assertLogs('transformers',level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowercase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 7 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "fnet"
def __init__( self : List[Any] , _UpperCamelCase : Tuple=3_2_0_0_0 , _UpperCamelCase : Optional[Any]=7_6_8 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Optional[int]="gelu_new" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Tuple=1e-12 , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=1 , _UpperCamelCase : List[Any]=2 , **_UpperCamelCase : List[str] , ) ->Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_tpu_fourier_optimizations
snake_case_ = tpu_short_seq_length | 8 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={'vocab_file': 'vocab.txt'}
__lowerCAmelCase : List[Any] ={
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowerCAmelCase : Any ={
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = collections.OrderedDict()
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as reader:
__SCREAMING_SNAKE_CASE : Optional[int] = reader.readlines()
for index, token in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : int = token.rstrip('''\n''' )
__SCREAMING_SNAKE_CASE : Tuple = index
return vocab
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]="<unk>" , lowerCAmelCase__ :str=200 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab
__SCREAMING_SNAKE_CASE : Union[str, Any] = unk_token
__SCREAMING_SNAKE_CASE : Optional[Any] = max_input_chars_per_word
def __magic_name__( self :str , lowerCAmelCase__ :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = []
while start < len(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = None
while start < end:
__SCREAMING_SNAKE_CASE : List[str] = ''''''.join(chars[start:end] )
if substr in self.vocab:
__SCREAMING_SNAKE_CASE : Optional[int] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = end
return sub_tokens
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : Any = False
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :int="<d>" , lowerCAmelCase__ :int="</d>" , lowerCAmelCase__ :Optional[Any]="<s>" , lowerCAmelCase__ :Optional[Any]="</s>" , lowerCAmelCase__ :Optional[int]="<pad>" , lowerCAmelCase__ :Dict="<unk>" , lowerCAmelCase__ :Any="</n>" , lowerCAmelCase__ :List[str]="</_>" , lowerCAmelCase__ :Optional[Any]="left" , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=lowerCAmelCase__ , eod_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , line_token=lowerCAmelCase__ , space_token=lowerCAmelCase__ , padding_side=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = bod_token
__SCREAMING_SNAKE_CASE : List[str] = eod_token
__SCREAMING_SNAKE_CASE : Optional[int] = load_vocab(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.encoder[space_token]
__SCREAMING_SNAKE_CASE : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__SCREAMING_SNAKE_CASE : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Any = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __magic_name__( self :List[Any] ) -> int:
return self.encoder[self.bod_token]
@property
def __magic_name__( self :int ) -> List[Any]:
return self.encoder[self.eod_token]
@property
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
return self.encoder["\n"]
@property
def __magic_name__( self :Optional[int] ) -> int:
return len(self.encoder )
def __magic_name__( self :int ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__( self :str , lowerCAmelCase__ :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : int = []
for x in jieba.cut(lowerCAmelCase__ , cut_all=lowerCAmelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase__ ) )
return output_tokens
def __magic_name__( self :str , lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = [i for i in token_ids if i >= 0]
__SCREAMING_SNAKE_CASE : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :Tuple ) -> List[str]:
return token in self.encoder
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[str] ) -> str:
return "".join(lowerCAmelCase__ )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any] ) -> List[Any]:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__( self :Any , lowerCAmelCase__ :Any ) -> Optional[Any]:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __magic_name__( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if os.path.isdir(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__SCREAMING_SNAKE_CASE : Optional[int] = 0
if " " in self.encoder:
__SCREAMING_SNAKE_CASE : List[str] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__SCREAMING_SNAKE_CASE : Tuple = self.encoder['''\n''']
del self.encoder["\n"]
__SCREAMING_SNAKE_CASE : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase__ : x[1] ) )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
__SCREAMING_SNAKE_CASE : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :List[int] = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __magic_name__( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ ))
return [1] + ([0] * len(lowerCAmelCase__ ))
| 9 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a ) -> tuple[float, list[float]]:
"""simple docstring"""
lowerCamelCase__: Any =list(range(len(__a ) ) )
lowerCamelCase__: Optional[Any] =[v / w for v, w in zip(__a , __a )]
index.sort(key=lambda __a : ratio[i] , reverse=__a )
lowerCamelCase__: float =0
lowerCamelCase__: list[float] =[0] * len(__a )
for i in index:
if weight[i] <= capacity:
lowerCamelCase__: Optional[int] =1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__: Optional[int] =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
from string import ascii_uppercase
lowerCAmelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase__ = dict(enumerate(ascii_uppercase))
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Dict = len(UpperCamelCase__ )
_A : Union[str, Any] = 0
while True:
if x == i:
_A : str = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Any = ""
_A : Union[str, Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_A : str = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_A : Union[str, Any] = ""
_A : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_A : List[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _UpperCAmelCase ():
_A : int = "THE GERMAN ATTACK"
_A : List[str] = "SECRET"
_A : Union[str, Any] = generate_key(UpperCamelCase__ , UpperCamelCase__ )
_A : Any = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
UpperCAmelCase_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 12 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : int = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCAmelCase : List[str] = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE_: int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Any = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = set()
SCREAMING_SNAKE_CASE_: int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]="replace" , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : List[Any]="</s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : str="<mask>" , lowerCAmelCase__ : Optional[int]=False , **lowerCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: str = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: Any = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: List[Any] = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : str):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: str = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = bigram
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
SCREAMING_SNAKE_CASE_: Any = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: Optional[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Union[str, Any] = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: Tuple = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: int = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = word
return word
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[Any]):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[Any] = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: str = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[int] = " " + text
return (text, kwargs)
| 13 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__ = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ = "text"
UpperCAmelCase__ = "labels"
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase__):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""")
A__ = copy.deepcopy(self)
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 14 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE :Any = re.compile(R'\b(a|an|the)\b', re.UNICODE)
SCREAMING_SNAKE_CASE :List[str] = None
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=a_ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=a_ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__A = bool(qa["answers"]["text"] )
return qid_to_has_ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
def remove_articles(a_ ):
return ARTICLES_REGEX.sub(" " , a_ )
def white_space_fix(a_ ):
return " ".join(text.split() )
def remove_punc(a_ ):
__A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if not s:
return []
return normalize_answer(a_ ).split()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
return int(normalize_answer(a_ ) == normalize_answer(a_ ) )
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = get_tokens(a_ )
__A = get_tokens(a_ )
__A = collections.Counter(a_ ) & collections.Counter(a_ )
__A = sum(common.values() )
if len(a_ ) == 0 or len(a_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__A = 1.0 * num_same / len(a_ )
__A = 1.0 * num_same / len(a_ )
__A = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
__A = {}
__A = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__A = qa["id"]
__A = [t for t in qa["answers"]["text"] if normalize_answer(a_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__A = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__A = preds[qid]
# Take max over all gold answers
__A = max(compute_exact(a_ , a_ ) for a in gold_answers )
__A = max(compute_fa(a_ , a_ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
__A = {}
for qid, s in scores.items():
__A = na_probs[qid] > na_prob_thresh
if pred_na:
__A = float(not qid_to_has_ans[qid] )
else:
__A = s
return new_scores
def UpperCAmelCase ( a_ , a_ , a_=None ) -> int:
"""simple docstring"""
if not qid_list:
__A = len(a_ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__A = len(a_ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
for k in new_eval:
__A = new_eval[k]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
plt.step(a_ , a_ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(a_ , a_ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a_ )
plt.savefig(a_ )
plt.clf()
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=None , a_=None ) -> int:
"""simple docstring"""
__A = sorted(a_ , key=lambda a_ : na_probs[k] )
__A = 0.0
__A = 1.0
__A = 0.0
__A = [1.0]
__A = [0.0]
__A = 0.0
for i, qid in enumerate(a_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__A = true_pos / float(i + 1 )
__A = true_pos / float(a_ )
if i == len(a_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a_ )
recalls.append(a_ )
if out_image:
plot_pr_curve(a_ , a_ , a_ , a_ )
return {"ap": 100.0 * avg_prec}
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
if out_image_dir and not os.path.exists(a_ ):
os.makedirs(a_ )
__A = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__A = make_precision_recall_eval(
a_ , a_ , a_ , a_ , out_image=os.path.join(a_ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__A = make_precision_recall_eval(
a_ , a_ , a_ , a_ , out_image=os.path.join(a_ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__A = {k: float(a_ ) for k, v in qid_to_has_ans.items()}
__A = make_precision_recall_eval(
a_ , a_ , a_ , a_ , out_image=os.path.join(a_ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(a_ , a_ , "pr_exact" )
merge_eval(a_ , a_ , "pr_f1" )
merge_eval(a_ , a_ , "pr_oracle" )
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
if not qid_list:
return
__A = [na_probs[k] for k in qid_list]
__A = np.ones_like(a_ ) / float(len(a_ ) )
plt.hist(a_ , weights=a_ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(a_ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Any:
"""simple docstring"""
__A = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__A = num_no_ans
__A = cur_score
__A = 0.0
__A = sorted(a_ , key=lambda a_ : na_probs[k] )
for i, qid in enumerate(a_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__A = scores[qid]
else:
if preds[qid]:
__A = -1
else:
__A = 0
cur_score += diff
if cur_score > best_score:
__A = cur_score
__A = na_probs[qid]
return 100.0 * best_score / len(a_ ), best_thresh
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
__A , __A = find_best_thresh(a_ , a_ , a_ , a_ )
__A , __A = find_best_thresh(a_ , a_ , a_ , a_ )
__A = best_exact
__A = exact_thresh
__A = best_fa
__A = fa_thresh
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
with open(OPTS.data_file ) as f:
__A = json.load(a_ )
__A = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__A = json.load(a_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__A = json.load(a_ )
else:
__A = {k: 0.0 for k in preds}
__A = make_qid_to_has_ans(a_ ) # maps qid to True/False
__A = [k for k, v in qid_to_has_ans.items() if v]
__A = [k for k, v in qid_to_has_ans.items() if not v]
__A , __A = get_raw_scores(a_ , a_ )
__A = apply_no_ans_threshold(a_ , a_ , a_ , OPTS.na_prob_thresh )
__A = apply_no_ans_threshold(a_ , a_ , a_ , OPTS.na_prob_thresh )
__A = make_eval_dict(a_ , a_ )
if has_ans_qids:
__A = make_eval_dict(a_ , a_ , qid_list=a_ )
merge_eval(a_ , a_ , "HasAns" )
if no_ans_qids:
__A = make_eval_dict(a_ , a_ , qid_list=a_ )
merge_eval(a_ , a_ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(a_ , a_ , a_ , a_ , a_ , a_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a_ , a_ , a_ , a_ , a_ , OPTS.out_image_dir )
histogram_na_prob(a_ , a_ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(a_ , a_ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(a_ , a_ )
else:
print(json.dumps(a_ , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 15 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = " " ) -> list:
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = 0
for index, char in enumerate(__lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
lowercase__ : Union[str, Any] = index + 1
elif index + 1 == len(__lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 0 |
"""simple docstring"""
from math import factorial
_a = {str(d): factorial(d) for d in range(10)}
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase_))
def _A ( ) -> int:
'''simple docstring'''
__lowercase = 7 * factorial(9) + 1
return sum(i for i in range(3, UpperCamelCase_) if sum_of_digit_factorial(UpperCamelCase_) == i)
if __name__ == "__main__":
print(F"{solution() = }")
| 17 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : Union[str, Any] = ''''''
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : List[Any] = ''''''
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE_ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE_ : List[Any] = api.user_timeline(screen_name=lowerCAmelCase , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE_ : int = api.user_timeline(
screen_name=lowerCAmelCase , count=2_0_0 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : str = alltweets[-1].id - 1
print(f'...{len(lowerCAmelCase )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE_ : Dict = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , "w" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = csv.writer(lowerCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 18 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.