code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["flax", "transformers"]
def __init__( self : int ,*lowercase_ : List[str] ,**lowercase_ : Union[str, Any] ):
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,*lowercase_ : int ,**lowercase_ : Union[str, Any] ):
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : Dict ,*lowercase_ : Tuple ,**lowercase_ : str ):
requires_backends(cls ,['''flax''', '''transformers'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["flax", "transformers"]
def __init__( self : Optional[int] ,*lowercase_ : str ,**lowercase_ : Optional[Any] ):
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : List[str] ,*lowercase_ : List[str] ,**lowercase_ : Optional[Any] ):
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,*lowercase_ : Union[str, Any] ,**lowercase_ : List[str] ):
requires_backends(cls ,['''flax''', '''transformers'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["flax", "transformers"]
def __init__( self : int ,*lowercase_ : List[Any] ,**lowercase_ : str ):
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : List[str] ,*lowercase_ : Union[str, Any] ,**lowercase_ : Any ):
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : int ,*lowercase_ : Optional[Any] ,**lowercase_ : Union[str, Any] ):
requires_backends(cls ,['''flax''', '''transformers'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["flax", "transformers"]
def __init__( self : Union[str, Any] ,*lowercase_ : List[str] ,**lowercase_ : Optional[Any] ):
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : int ,*lowercase_ : Dict ,**lowercase_ : Optional[Any] ):
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] ,*lowercase_ : str ,**lowercase_ : Optional[int] ):
requires_backends(cls ,['''flax''', '''transformers'''] )
| 106 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 247 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self, lowerCamelCase=0) -> int:
"""simple docstring"""
_lowercase : List[Any] = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Dict = np.random.RandomState(lowerCamelCase)
_lowercase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.7_5,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : int = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Optional[int] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Dict = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
# warmup pass to apply optimizations
_lowercase : List[Any] = pipe(**self.get_dummy_inputs())
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Tuple = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Dict = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Tuple = pipe(**lowerCamelCase).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : Union[str, Any] = init_image.resize((7_68, 5_12))
# using the PNDM scheduler by default
_lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = 'A fantasy landscape, trending on artstation'
_lowercase : int = np.random.RandomState(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : int = output.images
_lowercase : Optional[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_lowercase : Optional[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : Dict = init_image.resize((7_68, 5_12))
_lowercase : int = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
_lowercase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = 'A fantasy landscape, trending on artstation'
_lowercase : Tuple = np.random.RandomState(0)
_lowercase : List[Any] = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.7_5, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : Dict = output.images
_lowercase : List[str] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_lowercase : Optional[Any] = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 84 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
SCREAMING_SNAKE_CASE : Optional[Any] = "sshleifer/mar_enro_6_3_student"
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
super().setUp()
_lowercase : int = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz', extract_compressed_file=lowerCamelCase, )
_lowercase : Any = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(lowerCamelCase)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowercase : Optional[int] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py')[1].strip()
_lowercase : List[Any] = bash_script.replace('\\\n', '').strip().replace('"$@"', '')
for k, v in env_vars_to_replace.items():
_lowercase : str = bash_script.replace(lowerCamelCase, str(lowerCamelCase))
_lowercase : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowercase : Tuple = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowercase : int = ['finetune.py'] + bash_script.split() + args
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[int] = argparse.ArgumentParser()
_lowercase : str = pl.Trainer.add_argparse_args(lowerCamelCase)
_lowercase : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase, os.getcwd())
_lowercase : List[Any] = parser.parse_args()
_lowercase : Union[str, Any] = main(lowerCamelCase)
# Check metrics
_lowercase : Tuple = load_json(model.metrics_save_path)
_lowercase : Dict = metrics['val'][0]
_lowercase : int = metrics['val'][-1]
self.assertEqual(len(metrics['val']), (args.max_epochs / args.val_check_interval))
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''], lowerCamelCase)
self.assertGreater(last_step_stats['val_avg_gen_time'], 0.0_1)
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'], 1.0)
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'], 2)
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'], 17)
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu']), 1.1)
# check lightning ckpt can be loaded and has a reasonable statedict
_lowercase : List[Any] = os.listdir(lowerCamelCase)
_lowercase : Optional[Any] = [x for x in contents if x.endswith('.ckpt')][0]
_lowercase : List[str] = os.path.join(args.output_dir, lowerCamelCase)
_lowercase : List[Any] = torch.load(lowerCamelCase, map_location='cpu')
_lowercase : str = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowercase : int = {os.path.basename(lowerCamelCase) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test']) == 1
class _lowerCamelCase( _a ):
@timeout_decorator.timeout(6_00)
@slow
@require_torch_gpu
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowercase : Optional[Any] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowercase : Optional[int] = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py')[1].strip()
)
_lowercase : Any = bash_script.replace('\\\n', '').strip().replace('"$@"', '')
_lowercase : List[str] = bash_script.replace('--fp16 ', ' ')
for k, v in env_vars_to_replace.items():
_lowercase : Optional[int] = bash_script.replace(lowerCamelCase, str(lowerCamelCase))
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : str = bash_script.replace('--fp16', '')
_lowercase : Dict = 6
_lowercase : Tuple = (
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Dict = argparse.ArgumentParser()
_lowercase : int = pl.Trainer.add_argparse_args(lowerCamelCase)
_lowercase : Tuple = SummarizationDistiller.add_model_specific_args(lowerCamelCase, os.getcwd())
_lowercase : Optional[int] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowercase : Tuple = distill_main(lowerCamelCase)
# Check metrics
_lowercase : Tuple = load_json(model.metrics_save_path)
_lowercase : Any = metrics['val'][0]
_lowercase : int = metrics['val'][-1]
assert len(metrics['val']) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''], lowerCamelCase)
# check lightning ckpt can be loaded and has a reasonable statedict
_lowercase : List[str] = os.listdir(lowerCamelCase)
_lowercase : List[Any] = [x for x in contents if x.endswith('.ckpt')][0]
_lowercase : List[str] = os.path.join(args.output_dir, lowerCamelCase)
_lowercase : Tuple = torch.load(lowerCamelCase, map_location='cpu')
_lowercase : Dict = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowercase : List[Any] = {os.path.basename(lowerCamelCase) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test']) == 1
| 84 | 1 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 32 , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = [0.48145466, 0.4578275, 0.40821073] , __UpperCAmelCase = [0.26862954, 0.26130258, 0.27577711] , __UpperCAmelCase = True , __UpperCAmelCase=7 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=3 , ) -> List[str]:
_a = parent
_a = do_resize
_a = size if size is not None else {'''shortest_edge''': 288}
_a = size_divisor
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = do_center_crop
_a = image_mean
_a = image_std
_a = do_pad
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
def _UpperCAmelCase ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> int:
if not batched:
_a = self.size['''shortest_edge''']
_a = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
_a = size / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
_a , _a = size, scale * w
else:
_a , _a = scale * h, size
_a = int((1333 / 800) * size )
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > max_size:
_a = max_size / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_a = newh * scale
_a = neww * scale
_a , _a = int(newh + 0.5 ), int(neww + 0.5 )
_a , _a = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(SCREAMING_SNAKE_CASE__ , key=lambda __UpperCAmelCase : item[0] )[0]
_a = max(SCREAMING_SNAKE_CASE__ , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = BridgeTowerImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = BridgeTowerImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> int:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size_divisor''' ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> List[str]:
# Initialize image processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> str:
# Initialize image processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 320 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = old_name
if "patch_embed" in old_name:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = old_name.split('.' )
if layer == "0":
lowerCamelCase : int = old_name.replace('0', 'convolution1' )
elif layer == "1":
lowerCamelCase : int = old_name.replace('1', 'batchnorm_before' )
elif layer == "3":
lowerCamelCase : str = old_name.replace('3', 'convolution2' )
else:
lowerCamelCase : str = old_name.replace('4', 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d', a_ ):
lowerCamelCase : Dict = r'\b\d{2}\b'
if bool(re.search(a_, a_ ) ):
lowerCamelCase : str = re.search(r'\d\.\d\d.', a_ ).group()
else:
lowerCamelCase : str = re.search(r'\d\.\d.', a_ ).group()
if int(match[0] ) < 6:
lowerCamelCase : Any = old_name.replace(a_, '' )
lowerCamelCase : Dict = trimmed_name.replace('network', match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCamelCase : List[str] = 'intermediate_stages.' + trimmed_name
else:
lowerCamelCase : Union[str, Any] = old_name.replace(a_, '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase : Tuple = trimmed_name.replace('network', 'meta4D_layers.blocks.' + match[2] )
else:
lowerCamelCase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase : Tuple = trimmed_name.replace('network', 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCamelCase : str = trimmed_name.replace('norm1', 'layernorm1' )
elif "norm2" in old_name:
lowerCamelCase : Dict = trimmed_name.replace('norm2', 'layernorm2' )
elif "fc1" in old_name:
lowerCamelCase : Tuple = trimmed_name.replace('fc1', 'linear_in' )
elif "fc2" in old_name:
lowerCamelCase : int = trimmed_name.replace('fc2', 'linear_out' )
lowerCamelCase : List[Any] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.', a_ ):
lowerCamelCase : Optional[int] = old_name.replace('network', 'intermediate_stages' )
if "fc" in new_name:
lowerCamelCase : str = new_name.replace('fc', 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase : List[str] = new_name.replace('norm1', 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase : List[str] = new_name.replace('norm2', 'batchnorm_after' )
if "proj" in new_name:
lowerCamelCase : int = new_name.replace('proj', 'projection' )
if "dist_head" in new_name:
lowerCamelCase : Optional[int] = new_name.replace('dist_head', 'distillation_classifier' )
elif "head" in new_name:
lowerCamelCase : str = new_name.replace('head', 'classifier' )
elif "patch_embed" in new_name:
lowerCamelCase : str = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase : Tuple = new_name.replace('norm', 'layernorm' )
lowerCamelCase : Tuple = 'efficientformer.' + new_name
else:
lowerCamelCase : List[Any] = 'efficientformer.encoder.' + new_name
return new_name
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
for key in checkpoint.copy().keys():
lowerCamelCase : Dict = checkpoint.pop(a_ )
lowerCamelCase : str = val
return checkpoint
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )['model']
lowerCamelCase : int = EfficientFormerConfig.from_json_file(a_ )
lowerCamelCase : int = EfficientFormerForImageClassificationWithTeacher(a_ )
lowerCamelCase : List[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCamelCase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase : Optional[int] = convert_torch_checkpoint(a_, a_ )
model.load_state_dict(a_ )
model.eval()
lowerCamelCase : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase : List[Any] = prepare_img()
lowerCamelCase : Optional[Any] = 256
lowerCamelCase : Optional[Any] = 224
lowerCamelCase : List[str] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size}, crop_size={'height': crop_size, 'width': crop_size}, resample=pillow_resamplings['bicubic'], )
lowerCamelCase : Any = processor(images=a_, return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCamelCase : Optional[int] = Compose(
[
Resize(a_, interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(a_ ),
ToTensor(),
Normalize(a_, a_ ),
] )
lowerCamelCase : Tuple = image_transforms(a_ ).unsqueeze(0 )
assert torch.allclose(a_, a_ )
lowerCamelCase : Tuple = model(a_ )
lowerCamelCase : int = outputs.logits
lowerCamelCase : int = (1, 1000)
if "l1" in model_name:
lowerCamelCase : Any = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10], a_, atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase : Dict = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10], a_, atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase : Optional[int] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(a_ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='Add model', use_temp_dir=a_, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='Add image processor', use_temp_dir=a_, )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
_A = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 205 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
while b:
lowerCamelCase , lowerCamelCase : Tuple = b, a % b
return a
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(a_, a % b )
def UpperCAmelCase ( ):
'''simple docstring'''
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" )
if __name__ == "__main__":
main()
| 205 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_A : List[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A : int = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_A : List[str] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_A : List[Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_A : Optional[int] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_A : List[str] = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def __magic_name__ ( __snake_case : Union[str, Any] ) -> int:
lowercase : Optional[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __a )
return [m.group(0 ) for m in matches]
def __magic_name__ ( ) -> Optional[int]:
lowercase : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase : Optional[Any] = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowercase : int = collections.defaultdict(__a )
lowercase : Tuple = collections.defaultdict(__a )
lowercase : Any = collections.defaultdict(__a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a ):
lowercase : List[str] = None
if _re_tf_models.match(__a ) is not None:
lowercase : Tuple = tf_models
lowercase : str = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
lowercase : Optional[Any] = flax_models
lowercase : Union[str, Any] = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
lowercase : int = pt_models
lowercase : Any = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_prefix_to_model_type:
lowercase : int = True
break
# Try again after removing the last word in the name
lowercase : Optional[int] = ''.join(camel_case_split(__a )[:-1] )
lowercase : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowercase : Union[str, Any] = list(__a )
all_models.sort()
lowercase : str = {'model_type': all_models}
lowercase : Tuple = [pt_models[t] for t in all_models]
lowercase : Tuple = [tf_models[t] for t in all_models]
lowercase : Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowercase : List[str] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowercase : Union[str, Any] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowercase : Dict = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowercase : Union[str, Any] = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowercase : Optional[Any] = 'AutoTokenizer'
lowercase : Optional[Any] = [processors[t] for t in all_models]
return pd.DataFrame(__a )
def __magic_name__ ( __snake_case : Optional[Any] ) -> Optional[Any]:
lowercase : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowercase : Dict = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowercase : str = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__a , __a , __a ):
# The type of pipeline may not exist in this framework
if not hasattr(__a , __a ):
continue
# First extract all model_names
lowercase : int = []
for name in getattr(__a , __a ).values():
if isinstance(__a , __a ):
model_names.append(__a )
else:
model_names.extend(list(__a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __magic_name__ ( __snake_case : str , __snake_case : Any ) -> Optional[Any]:
lowercase : List[str] = get_frameworks_table()
lowercase : Dict = Dataset.from_pandas(__a )
lowercase : List[str] = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__a )
lowercase : List[Any] = Dataset.from_json(__a )
lowercase : Optional[Any] = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(__a ) )
}
lowercase : Union[str, Any] = update_pipeline_and_auto_class_table(__a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowercase : List[str] = sorted(table.keys() )
lowercase : Any = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
lowercase : List[str] = Dataset.from_pandas(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__a , "pipeline_tags.json" ) )
if commit_sha is not None:
lowercase : str = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowercase : Optional[Any] = 'Update'
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__a , repo_type="dataset" , token=__a , commit_message=__a , )
def __magic_name__ ( ) -> Union[str, Any]:
lowercase : Tuple = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowercase : Optional[Any] = transformers_module.pipelines.SUPPORTED_TASKS
lowercase : Optional[int] = []
for key in pipeline_tasks:
if key not in in_table:
lowercase : List[str] = pipeline_tasks[key]['pt']
if isinstance(__a , (list, tuple) ):
lowercase : Any = model[0]
lowercase : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(__a )
if len(__a ) > 0:
lowercase : Optional[Any] = ', '.join(__a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
_A : List[str] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 202 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
A__ : List[Any] =True if '''large''' in model_name or '''huge''' in model_name else False
A__ : str =True if '''large''' in model_name or '''huge''' in model_name else False
A__ : int =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A__ : Optional[Any] =[3, 3, 3, 3]
A__ : Optional[Any] =[5, 5, 5, 5]
elif "fl4" in model_name:
A__ : Optional[Any] =[4, 4, 4, 4]
A__ : Any =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A__ : List[Any] =[3, 3, 3, 3]
if "lrf" in model_name:
A__ : Dict =[3, 3, 3, 3]
else:
A__ : int =[2, 2, 2, 2]
if "tiny" in model_name:
A__ : Dict =96
elif "small" in model_name:
A__ : Dict =96
elif "base" in model_name:
A__ : Dict =128
elif "large" in model_name:
A__ : int =192
elif "xlarge" in model_name:
A__ : int =256
elif "huge" in model_name:
A__ : Tuple =352
# set label information
A__ : Optional[int] ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
A__ : List[Any] ='''imagenet-22k-id2label.json'''
else:
A__ : str ='''imagenet-1k-id2label.json'''
A__ : Optional[Any] =json.load(open(hf_hub_download(_snake_case, _snake_case, repo_type="""dataset""" ), """r""" ) )
A__ : Tuple ={int(_snake_case ): v for k, v in idalabel.items()}
A__ : Union[str, Any] ={v: k for k, v in idalabel.items()}
A__ : Union[str, Any] =FocalNetConfig(
embed_dim=_snake_case, depths=_snake_case, focal_levels=_snake_case, focal_windows=_snake_case, use_conv_embed=_snake_case, idalabel=_snake_case, labelaid=_snake_case, use_post_layernorm=_snake_case, use_layerscale=_snake_case, )
return config
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if "patch_embed.proj" in name:
A__ : Optional[int] =name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A__ : List[Any] =name.replace("""patch_embed.norm""", """embeddings.norm""" )
if "layers" in name:
A__ : List[str] ='''encoder.''' + name
if "encoder.layers" in name:
A__ : str =name.replace("""encoder.layers""", """encoder.stages""" )
if "downsample.proj" in name:
A__ : Tuple =name.replace("""downsample.proj""", """downsample.projection""" )
if "blocks" in name:
A__ : Dict =name.replace("""blocks""", """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A__ : Optional[int] =name.replace("""modulation.f""", """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A__ : Tuple =name.replace("""modulation.h""", """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A__ : int =name.replace("""modulation.proj""", """modulation.projection_out""" )
if name == "norm.weight":
A__ : str ='''layernorm.weight'''
if name == "norm.bias":
A__ : int ='''layernorm.bias'''
if "head" in name:
A__ : Optional[Any] =name.replace("""head""", """classifier""" )
else:
A__ : Any ='''focalnet.''' + name
return name
def __lowerCamelCase ( __snake_case : Any, __snake_case : Optional[Any], __snake_case : str=False ) -> Dict:
"""simple docstring"""
A__ : Any ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
A__ : Union[str, Any] =model_name_to_url[model_name]
print("""Checkpoint URL: """, _snake_case )
A__ : Any =torch.hub.load_state_dict_from_url(_snake_case, map_location="""cpu""" )['''model''']
# rename keys
for key in state_dict.copy().keys():
A__ : Dict =state_dict.pop(_snake_case )
A__ : Dict =val
A__ : int =get_focalnet_config(_snake_case )
A__ : List[Any] =FocalNetForImageClassification(_snake_case )
model.eval()
# load state dict
model.load_state_dict(_snake_case )
# verify conversion
A__ : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ : Dict =BitImageProcessor(
do_resize=_snake_case, size={"""shortest_edge""": 256}, resample=PILImageResampling.BILINEAR, do_center_crop=_snake_case, crop_size=224, do_normalize=_snake_case, image_mean=_snake_case, image_std=_snake_case, )
A__ : Optional[int] =Image.open(requests.get(_snake_case, stream=_snake_case ).raw )
A__ : Tuple =processor(images=_snake_case, return_tensors="""pt""" )
A__ : List[str] =transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] ),
] )
A__ : str =image_transforms(_snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, _snake_case, atol=1E-4 )
A__ : Any =model(**_snake_case )
A__ : Dict =outputs.logits.argmax(-1 ).item()
print("""Predicted class:""", model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""", outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A__ : Any =torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
A__ : Any =torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
A__ : Dict =torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
A__ : Union[str, Any] =torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
A__ : Optional[int] =torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
A__ : Dict =torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3], _snake_case, atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__snake_case : List[str] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__snake_case : Union[str, Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str=1, __snake_case : Tuple=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
return json.load(__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Dict:
"""simple docstring"""
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any, __snake_case : Any, __snake_case : Tuple=True ) -> Any:
"""simple docstring"""
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : List[Any] =os.path.join(__snake_case, """tmp""" )
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : Dict =read_json(os.path.join(__snake_case, """params.json""" ) )
A__ : Dict =NUM_SHARDS[model_size]
A__ : List[str] =params["""n_layers"""]
A__ : int =params["""n_heads"""]
A__ : str =n_heads // num_shards
A__ : Tuple =params["""dim"""]
A__ : Union[str, Any] =dim // n_heads
A__ : str =1_00_00.0
A__ : Any =1.0 / (base ** (torch.arange(0, __snake_case, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ : Optional[Any] =params["""n_kv_heads"""] # for GQA / MQA
A__ : int =n_heads_per_shard // num_key_value_heads
A__ : int =dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ : List[Any] =n_heads
A__ : List[str] =n_heads_per_shard
A__ : Dict =dim
# permute for sliced rotary
def permute(__snake_case : Tuple, __snake_case : Optional[int]=n_heads, __snake_case : int=dim, __snake_case : Optional[Any]=dim ):
return w.view(__snake_case, dima // n_heads // 2, 2, __snake_case ).transpose(1, 2 ).reshape(__snake_case, __snake_case )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ : List[str] =torch.load(os.path.join(__snake_case, """consolidated.00.pth""" ), map_location="""cpu""" )
else:
# Sharded
A__ : Optional[Any] =[
torch.load(os.path.join(__snake_case, f"consolidated.{i:02d}.pth" ), map_location="""cpu""" )
for i in range(__snake_case )
]
A__ : Optional[Any] =0
A__ : str ={"""weight_map""": {}}
for layer_i in range(__snake_case ):
A__ : Dict =f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Dict ={
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ : Any ={
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ : Optional[Any] =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ) )
A__ : int =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ), __snake_case, __snake_case, __snake_case, )
A__ : int =torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )], dim=1 )
A__ : Optional[int] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )], dim=0 )
A__ : str =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )], dim=1 )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )], dim=0 )
A__ : List[Any] =inv_freq
for k, v in state_dict.items():
A__ : Optional[Any] =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
A__ : Tuple =f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Tuple ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
A__ : Any ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__snake_case )], dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__snake_case )], dim=0 ),
}
for k, v in state_dict.items():
A__ : int =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
# Write configs
A__ : Union[str, Any] ={"""total_size""": param_count * 2}
write_json(__snake_case, os.path.join(__snake_case, """pytorch_model.bin.index.json""" ) )
A__ : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
A__ : List[Any] =params["""multiple_of"""] if """multiple_of""" in params else 256
A__ : int =LlamaConfig(
hidden_size=__snake_case, intermediate_size=compute_intermediate_size(__snake_case, __snake_case, __snake_case ), num_attention_heads=params["""n_heads"""], num_hidden_layers=params["""n_layers"""], rms_norm_eps=params["""norm_eps"""], num_key_value_heads=__snake_case, )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
A__ : List[Any] =LlamaForCausalLM.from_pretrained(__snake_case, torch_dtype=torch.floataa, low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__snake_case, safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ : List[str] =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""", help="""Location of LLaMA weights, which contains tokenizer.model and model folders""", )
parser.add_argument(
"""--model_size""", choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""], )
parser.add_argument(
"""--output_dir""", help="""Location to write HF model and tokenizer""", )
parser.add_argument("""--safe_serialization""", type=__snake_case, help="""Whether or not to save using `safetensors`.""" )
A__ : Any =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ : List[Any] =os.path.join(args.input_dir, """tokenizer.model""" )
write_tokenizer(args.output_dir, __snake_case )
if __name__ == "__main__":
main()
| 136 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
_lowerCamelCase :Union[str, Any] = 1
@register_to_config
def __init__( self : List[Any] , UpperCamelCase : Dict=20_00 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=20 , UpperCamelCase : Optional[int]=1E-3 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int = None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : int=None ) -> Dict:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase__ : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase__ : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase__ : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase__ : List[Any] = std.unsqueeze(-1 )
lowerCAmelCase__ : int = -score / std
# compute
lowerCAmelCase__ : Tuple = -1.0 / len(self.timesteps )
lowerCAmelCase__ : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase__ : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase__ : Union[str, Any] = beta_t.unsqueeze(-1 )
lowerCAmelCase__ : Tuple = -0.5 * beta_t * x
lowerCAmelCase__ : Tuple = torch.sqrt(_a )
lowerCAmelCase__ : Dict = drift - diffusion**2 * score
lowerCAmelCase__ : Dict = x + drift * dt
# add noise
lowerCAmelCase__ : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCAmelCase__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 242 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :str = '''gpt_neo'''
lowerCamelCase :List[Any] = ['''past_key_values''']
lowerCamelCase :Any = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_02_57 , lowerCAmelCase_=20_48 , lowerCAmelCase_=20_48 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=2_56 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , **lowerCAmelCase_ , ) -> int:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_layers
_A = num_heads
_A = intermediate_size
_A = window_size
_A = activation_function
_A = resid_dropout
_A = embed_dropout
_A = attention_dropout
_A = classifier_dropout
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
_A = bos_token_id
_A = eos_token_id
_A = attention_types
_A = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ ) -> Dict:
_A = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def snake_case ( snake_case__ :List[str] , snake_case__ :Dict , snake_case__ :List[Any] , snake_case__ :str) -> str:
import torch
_A = input.size()
_A = len(snake_case__)
_A = shape[dimension]
_A = torch.arange(0 , snake_case__ , snake_case__)
_A = torch.div(sizedim - size , snake_case__ , rounding_mode="""floor""") + 1
_A = torch.arange(snake_case__) + low_indices[:min_length][:, None]
_A = [slice(snake_case__)] * rank
_A = indices
_A = input[s]
_A = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(snake_case__)
def snake_case ( snake_case__ :Tuple , snake_case__ :List[Any]) -> int:
import torch
_A = torch.arange(1 , snake_case__)
_A = torch.remainder(snake_case__ , snake_case__)
_A = remainders == 0
_A = candidates[divisor_indices]
_A = torch.max(snake_case__)
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode="""floor""")
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_A = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" )
_A = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_A = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase ( self ) -> int:
return self._config.num_heads
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_A = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
_A = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_A = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_A = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
_A = common_inputs['''attention_mask''']
if self.use_past:
_A = ordered_inputs['''attention_mask'''].dtype
_A = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self ) -> int:
return 13
| 371 | def snake_case ( ) -> Any:
for n in range(1 , 1_000_000):
yield n * (n + 1) // 2
def snake_case ( snake_case__ :Dict) -> Optional[Any]:
_A = 1
_A = 2
while i * i <= n:
_A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(snake_case__) > 500)
if __name__ == "__main__":
print(solution())
| 81 | 0 |
'''simple docstring'''
import math
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( __A = 0.1 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 3
UpperCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258 | 0 |
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : int | float | str , lowercase__ : int | float | str ) -> Tuple:
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('Enter the last number (nth term) of the P-Series'))
__UpperCAmelCase = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 350 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28 | 0 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Optional[Any] = len(__SCREAMING_SNAKE_CASE ) + 1
__lowerCAmelCase: Optional[Any] = len(__SCREAMING_SNAKE_CASE ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCAmelCase: Any = [[0 for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
# since string of zero length match pattern of zero length
__lowerCAmelCase: List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Any = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Dict = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __SCREAMING_SNAKE_CASE ):
for j in range(1 , __SCREAMING_SNAKE_CASE ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCAmelCase: List[str] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCAmelCase: List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCAmelCase: Optional[int] = dp[i - 1][j]
else:
__lowerCAmelCase: Dict = 0
else:
__lowerCAmelCase: Optional[int] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__A = "aab"
__A = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 217 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Tuple = str(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) == 9 and set(__SCREAMING_SNAKE_CASE ) == set("123456789" )
def a__ ( ) -> int | None:
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__lowerCAmelCase: Tuple = 1_0_0_0_0_2 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__lowerCAmelCase: int = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A :
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def __lowerCAmelCase ( self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
| 311 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
lowercase : List[str] = 1
while K:
lowercase : List[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
lowercase : Any = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 311 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.exp(_A )
SCREAMING_SNAKE_CASE__ = torch.sum(_A , dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_A ) - B / A
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = config.output_attentions
SCREAMING_SNAKE_CASE__ = config.output_hidden_states
SCREAMING_SNAKE_CASE__ = nn.ModuleList([BertLayer(__lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE__ = nn.ModuleList([BertHighway(__lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE__ = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self : Tuple , __lowerCamelCase : List[str] ) -> str:
if (type(__lowerCamelCase ) is float) or (type(__lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE__ = x
else:
SCREAMING_SNAKE_CASE__ = x
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , ) -> Any:
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE__ = layer_module(
__lowerCamelCase , __lowerCamelCase , head_mask[i] , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE__ = all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE__ = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE__ = current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE__ = current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE__ = self.highway[i](__lowerCamelCase )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE__ = highway_exit[0]
SCREAMING_SNAKE_CASE__ = entropy(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE__ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCamelCase , i + 1 )
else:
SCREAMING_SNAKE_CASE__ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE__ = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE__ = outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE__ = outputs + (all_attentions,)
SCREAMING_SNAKE_CASE__ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , A__ , )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict ) -> int:
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = BertEmbeddings(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = DeeBertEncoder(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BertPooler(__lowerCamelCase )
self.init_weights()
def lowercase_ ( self : List[Any] ) -> List[str]:
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self : int ) -> str:
return self.embeddings.word_embeddings
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = value
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCamelCase )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def lowercase_ ( self : Any , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
SCREAMING_SNAKE_CASE__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = torch.ones(__lowerCamelCase , device=__lowerCamelCase )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = torch.ones(__lowerCamelCase , device=__lowerCamelCase )
if token_type_ids is None:
SCREAMING_SNAKE_CASE__ = torch.zeros(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE__ = self.get_extended_attention_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE__ = encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE__ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE__ = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE__ = self.get_head_mask(__lowerCamelCase , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE__ = self.embeddings(
input_ids=__lowerCamelCase , position_ids=__lowerCamelCase , token_type_ids=__lowerCamelCase , inputs_embeds=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.encoder(
__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
SCREAMING_SNAKE_CASE__ = self.pooler(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = message
SCREAMING_SNAKE_CASE__ = exit_layer # start from 1!
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Optional[int] ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = BertPooler(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Dict ) -> Optional[int]:
# Pooler
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
SCREAMING_SNAKE_CASE__ = self.pooler(__lowerCamelCase )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE__ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE__ = bmodel_output[1]
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.classifier(__lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , A__ , )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Optional[Any] ) -> Any:
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ = DeeBertModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : Tuple=False , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.num_layers
try:
SCREAMING_SNAKE_CASE__ = self.bert(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE__ = outputs[1]
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.classifier(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ = e.message
SCREAMING_SNAKE_CASE__ = e.exit_layer
SCREAMING_SNAKE_CASE__ = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ = entropy(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ = MSELoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
SCREAMING_SNAKE_CASE__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 314 |
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: Union[str, Any] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "vit_msn"
def __init__( self ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-06 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=16 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : List[str] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[Any] = image_size
_lowercase : str = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Any = qkv_bias
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCamelCase : int = None
try:
import msvcrt
except ImportError:
_UpperCamelCase : Dict = None
try:
import fcntl
except ImportError:
_UpperCamelCase : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCamelCase : Tuple = OSError
# Data
# ------------------------------------------------
_UpperCamelCase : Union[str, Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_UpperCamelCase : str = '3.0.12'
_UpperCamelCase : List[str] = None
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _logger
lowercase = _logger or logging.getLogger(__name__ )
return _logger
class a ( a_ ):
def __init__( self , _lowerCamelCase ):
lowercase = lock_file
return None
def __str__( self ):
lowercase = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class a :
def __init__( self , _lowerCamelCase ):
lowercase = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
self.lock.release()
return None
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase=-1 , _lowerCamelCase=None ):
lowercase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowercase = self.hash_filename_if_too_long(_lowerCamelCase , _lowerCamelCase )
# The path to the lock file.
lowercase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase = None
# The default timeout value.
lowercase = timeout
# We use this lock primarily for the lock counter.
lowercase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase = 0
return None
@property
def UpperCamelCase_ ( self ):
return self._lock_file
@property
def UpperCamelCase_ ( self ):
return self._timeout
@timeout.setter
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = float(_lowerCamelCase )
return None
def UpperCamelCase_ ( self ):
raise NotImplementedError()
def UpperCamelCase_ ( self ):
raise NotImplementedError()
@property
def UpperCamelCase_ ( self ):
return self._lock_file_fd is not None
def UpperCamelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=0.0_5 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase = id(self )
lowercase = self._lock_file
lowercase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(_lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase_ ( self , _lowerCamelCase=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase = id(self )
lowercase = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
lowercase = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
self.release()
return None
def __del__( self ):
self.release(force=_lowerCamelCase )
return None
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowercase = os.path.basename(_lowerCamelCase )
if len(_lowerCamelCase ) > max_length and max_length > 0:
lowercase = os.path.dirname(_lowerCamelCase )
lowercase = str(hash(_lowerCamelCase ) )
lowercase = filename[: max_length - len(_lowerCamelCase ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(_lowerCamelCase , _lowerCamelCase )
else:
return path
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=-1 , _lowerCamelCase=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCamelCase , timeout=_lowerCamelCase , max_filename_length=_lowerCamelCase )
lowercase = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def UpperCamelCase_ ( self ):
lowercase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase = os.open(self._lock_file , _lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCamelCase )
else:
lowercase = fd
return None
def UpperCamelCase_ ( self ):
lowercase = self._lock_file_fd
lowercase = None
msvcrt.locking(_lowerCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=-1 , _lowerCamelCase=None ):
lowercase = os.statvfs(os.path.dirname(_lowerCamelCase ) ).f_namemax
super().__init__(_lowerCamelCase , timeout=_lowerCamelCase , max_filename_length=_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase = os.open(self._lock_file , _lowerCamelCase )
try:
fcntl.flock(_lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCamelCase )
else:
lowercase = fd
return None
def UpperCamelCase_ ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase = self._lock_file_fd
lowercase = None
fcntl.flock(_lowerCamelCase , fcntl.LOCK_UN )
os.close(_lowerCamelCase )
return None
class a ( a_ ):
def UpperCamelCase_ ( self ):
lowercase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase = os.open(self._lock_file , _lowerCamelCase )
except OSError:
pass
else:
lowercase = fd
return None
def UpperCamelCase_ ( self ):
os.close(self._lock_file_fd )
lowercase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCamelCase : List[Any] = None
if msvcrt:
_UpperCamelCase : Tuple = WindowsFileLock
elif fcntl:
_UpperCamelCase : Optional[Any] = UnixFileLock
else:
_UpperCamelCase : Optional[Any] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 220 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase : Tuple = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : list ):
'''simple docstring'''
lowercase = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
_UpperCamelCase : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
_UpperCamelCase : Union[str, Any] = 'sshleifer/bart-tiny-random'
_UpperCamelCase : Tuple = 'sshleifer/tiny-mbart'
_UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class a ( a_ ):
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_lowerCamelCase , _lowerCamelCase )
lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
run_generate()
assert Path(_lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase_ ( self ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _lowerCamelCase ):
self.run_eval_tester(_lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
lowercase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
lowercase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
lowercase = Path(self.get_auto_remove_tmp_dir() )
lowercase = str(tmp_dir / 'scores.json' )
lowercase = str(tmp_dir / 'val.target' )
_dump_articles(_lowerCamelCase , text['en'] )
_dump_articles(_lowerCamelCase , text['de'] )
lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
lowercase = F'\n run_eval_search.py\n {model}\n {str(_lowerCamelCase )}\n {str(_lowerCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
lowercase = [' num_beams | length_penalty', model, 'Best score args']
lowercase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(_lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowerCamelCase ).exists()
os.remove(Path(_lowerCamelCase ) )
| 220 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( A__ , unittest.TestCase ):
UpperCamelCase__ = KandinskyVaaInpaintPipeline
UpperCamelCase__ = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
UpperCamelCase__ = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
UpperCamelCase__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase__ = False
@property
def SCREAMING_SNAKE_CASE ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self ):
return 100
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__magic_name__ : Optional[Any] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.dummy_unet
__magic_name__ : Optional[int] = self.dummy_movq
__magic_name__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase__ , )
__magic_name__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
__magic_name__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__magic_name__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
__magic_name__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__magic_name__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : str = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
__magic_name__ : List[str] = np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : int = 0
if str(lowerCamelCase__ ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(lowerCamelCase__ )
else:
__magic_name__ : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__magic_name__ : List[Any] = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = "cpu"
__magic_name__ : int = self.get_dummy_components()
__magic_name__ : Optional[int] = self.pipeline_class(**lowerCamelCase__ )
__magic_name__ : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__magic_name__ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__magic_name__ : Tuple = output.images
__magic_name__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__magic_name__ : Dict = image[0, -3:, -3:, -1]
__magic_name__ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[int] = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
__magic_name__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__magic_name__ : List[str] = np.ones((768, 768) , dtype=np.floataa )
__magic_name__ : List[Any] = 0
__magic_name__ : Optional[Any] = "a hat"
__magic_name__ : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__magic_name__ : str = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
__magic_name__ : Optional[Any] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__magic_name__ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
__magic_name__ , __magic_name__ : int = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__magic_name__ : int = pipeline(
image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
__magic_name__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 356 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__magic_name__ : int = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _snake_case )
if matches:
__magic_name__ : List[str] = float(matches[1] )
__magic_name__ : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ : List[str] = 1001
__magic_name__ : Tuple = "imagenet-1k-id2label.json"
__magic_name__ : Union[str, Any] = "huggingface/label-files"
__magic_name__ : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : Tuple = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
__magic_name__ : Dict = "background"
__magic_name__ : str = idalabel
__magic_name__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
__magic_name__ : List[Any] = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ : Dict = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
__magic_name__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
__magic_name__ : Any = model(**_snake_case )
__magic_name__ : Dict = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ : Optional[Any] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__magic_name__ : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing to the hub..." )
__magic_name__ : List[str] = "google/" + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 41 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__snake_case = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A_ ( ):
"""simple docstring"""
_a = os.path.dirname(os.path.realpath(snake_case__ ) )
_a = os.path.join(snake_case__, '''words.txt''' )
_a = ''''''
with open(snake_case__ ) as f:
_a = f.readline()
_a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_a = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution()) | 320 |
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''perceiver'''
def __init__(self , __magic_name__=256 , __magic_name__=1280 , __magic_name__=768 , __magic_name__=1 , __magic_name__=26 , __magic_name__=8 , __magic_name__=8 , __magic_name__=None , __magic_name__=None , __magic_name__="kv" , __magic_name__=1 , __magic_name__=1 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=True , __magic_name__=262 , __magic_name__=2048 , __magic_name__=56 , __magic_name__=[368, 496] , __magic_name__=16 , __magic_name__=1920 , __magic_name__=16 , __magic_name__=[1, 16, 224, 224] , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_latents
snake_case_ : str = d_latents
snake_case_ : Dict = d_model
snake_case_ : str = num_blocks
snake_case_ : Optional[Any] = num_self_attends_per_block
snake_case_ : Tuple = num_self_attention_heads
snake_case_ : List[str] = num_cross_attention_heads
snake_case_ : List[str] = qk_channels
snake_case_ : Tuple = v_channels
snake_case_ : Any = cross_attention_shape_for_attention
snake_case_ : Tuple = self_attention_widening_factor
snake_case_ : int = cross_attention_widening_factor
snake_case_ : int = hidden_act
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : int = initializer_range
snake_case_ : Any = layer_norm_eps
snake_case_ : List[Any] = use_query_residual
# masked language modeling attributes
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : str = max_position_embeddings
# image classification attributes
snake_case_ : Union[str, Any] = image_size
# flow attributes
snake_case_ : Optional[Any] = train_size
# multimodal autoencoding attributes
snake_case_ : List[Any] = num_frames
snake_case_ : str = audio_samples_per_frame
snake_case_ : List[str] = samples_per_patch
snake_case_ : List[Any] = output_shape
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Any = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Tuple = preprocessor.num_special_tokens_to_add(__magic_name__ )
snake_case_ : Dict = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : Dict = [''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case_ : List[str] = dict(preprocessor(__magic_name__ , return_tensors=__magic_name__ ) )
snake_case_ : Optional[Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(__magic_name__ , __magic_name__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Tuple = compute_effective_axis_dimension(__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case_ : Any = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : Tuple = dict(preprocessor(images=__magic_name__ , return_tensors=__magic_name__ ) )
snake_case_ : Any = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 279 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : int = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : Tuple = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params['''n_layers''']
snake_case_ : int = params['''n_heads''']
snake_case_ : Dict = n_heads // num_shards
snake_case_ : List[Any] = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : Any = 10_000.0
snake_case_ : Any = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : Optional[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : str = n_heads
snake_case_ : Optional[int] = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Optional[Any] = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : Union[str, Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : Optional[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : Union[str, Any] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : int = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : str = inv_freq
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Any = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Dict = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : List[str] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : int = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Optional[int] = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Optional[Any] = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : Dict = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case__ , snake_case__=None ):
'''simple docstring'''
A : Any = None
if token is not None:
A : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
A : Optional[int] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A : int = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
A : Tuple = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
A : Any = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase_ ):
A : Any = requests.get(url + F'&page={i + 2}' , headers=UpperCamelCase_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowerCAmelCase_ ( snake_case__ , snake_case__=None ):
'''simple docstring'''
A : Optional[Any] = None
if token is not None:
A : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
A : Tuple = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A : int = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
A : str = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
A : List[str] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase_ ):
A : str = requests.get(url + F'&page={i + 2}' , headers=UpperCamelCase_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = None
if token is not None:
A : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
A : Any = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
A : Any = result.headers['''Location''']
A : Any = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
A : Union[str, Any] = os.path.join(UpperCamelCase_ , F'{artifact_name}.zip' )
with open(UpperCamelCase_ , '''wb''' ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case__ , snake_case__=None ):
'''simple docstring'''
A : Optional[int] = []
A : Dict = []
A : Any = None
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase_ ) as f:
for line in f:
A : List[str] = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A : Optional[Any] = line[: line.index(''': ''' )]
A : List[Any] = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
A : int = line[len('''FAILED ''' ) :]
failed_tests.append(UpperCamelCase_ )
elif filename == "job_name.txt":
A : Optional[Any] = line
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` '
F'and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
''' problem.''' )
A : List[str] = None
if job_name and job_links:
A : Union[str, Any] = job_links.get(UpperCamelCase_ , UpperCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
A : str = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )]
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__=None ):
'''simple docstring'''
A : Optional[Any] = []
A : Optional[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) )
return errors
def lowerCAmelCase_ ( snake_case__ , snake_case__=None ):
'''simple docstring'''
A : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
A : int = counter.most_common()
A : List[str] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A : List[str] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
A : Tuple = dict(sorted(r.items() , key=lambda snake_case__ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
A : Tuple = test.split('''/''' )[2]
else:
A : Any = None
return test
def lowerCAmelCase_ ( snake_case__ , snake_case__=None ):
'''simple docstring'''
A : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
A : List[str] = [x for x in logs if x[2] is not None]
A : Union[str, Any] = {x[2] for x in logs}
A : Optional[int] = {}
for test in tests:
A : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A : List[str] = counter.most_common()
A : Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A : int = sum(error_counts.values() )
if n_errors > 0:
A : Union[str, Any] = {'''count''': n_errors, '''errors''': error_counts}
A : Union[str, Any] = dict(sorted(r.items() , key=lambda snake_case__ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = '''| no. | error | status |'''
A : Dict = '''|-:|:-|:-|'''
A : Tuple = [header, sep]
for error in reduced_by_error:
A : Optional[int] = reduced_by_error[error]['''count''']
A : Optional[Any] = F'| {count} | {error[:100]} | |'
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = '''| model | no. of errors | major error | count |'''
A : List[str] = '''|-:|-:|-:|-:|'''
A : int = [header, sep]
for model in reduced_by_model:
A : Optional[int] = reduced_by_model[model]['''count''']
A, A : List[str] = list(reduced_by_model[model]['''errors'''].items() )[0]
A : str = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase : Optional[int] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
lowercase : List[str] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase : int = k.find(' / ')
lowercase : str = k[index + len(' / ') :]
lowercase : Union[str, Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase : Optional[int] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase : Dict = reduce_by_error(errors)
lowercase : str = reduce_by_model(errors)
lowercase : str = make_github_table(reduced_by_error)
lowercase : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 3 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_UpperCamelCase: str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def lowercase__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_UpperCamelCase: Any = parser.parse_args()
_UpperCamelCase: Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 53 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase : List[str] = 0 if allow_empty_subarrays else float('-inf' )
lowercase : Dict = 0.0
for num in arr:
lowercase : List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 53 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(__UpperCAmelCase, 0, __UpperCAmelCase, args=(__UpperCAmelCase) )[0]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 76 | 0 |
_lowercase : Any ="\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowercase : Tuple =[{"type": "code", "content": INSTALL_CONTENT}]
_lowercase : Tuple ={
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 266 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = params
a__ : str = np.array(__lowercase )
a__ : List[Any] = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowercase ) -> Any:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.params.max_model_input_size
a__ : int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(__lowercase , __lowercase ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
a__ : Any = []
a__ : Optional[int] = []
if self.params.mlm:
a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : str = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
a__ : Optional[int] = np.array(__lowercase )
a__ : Any = np.array(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = len(self )
a__ : List[str] = self.lengths > 1_1
a__ : Dict = self.token_ids[indices]
a__ : List[str] = self.lengths[indices]
a__ : int = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
a__ : List[Any] = len(self )
a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5
a__ : Tuple = self.token_ids[indices]
a__ : Union[str, Any] = self.lengths[indices]
a__ : Tuple = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = [t[0] for t in batch]
a__ : Any = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
a__ : List[Any] = max(__lowercase )
# Pad token ids
if self.params.mlm:
a__ : int = self.params.special_tok_ids["""pad_token"""]
else:
a__ : List[str] = self.params.special_tok_ids["""unk_token"""]
a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 266 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ) -> str:
model.train()
__lowerCAmelCase: Any = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = F.mse_loss(__SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Tuple:
set_seed(4_2 )
__lowerCAmelCase: Any = RegressionModel()
__lowerCAmelCase: int = deepcopy(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = RegressionDataset(length=8_0 )
__lowerCAmelCase: Optional[Any] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowerCAmelCase: Optional[Any] = AdamW(params=model.parameters() , lr=1E-3 )
__lowerCAmelCase: Tuple = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowerCAmelCase: Tuple = LambdaLR(__SCREAMING_SNAKE_CASE , lr_lambda=lambda __SCREAMING_SNAKE_CASE : epoch**0.65 )
__lowerCAmelCase: Dict = LambdaLR(__SCREAMING_SNAKE_CASE , lr_lambda=lambda __SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Any = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase , __lowerCAmelCase: Dict = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
# Test when on a single CPU or GPU that the context manager does nothing
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_training_setup(__SCREAMING_SNAKE_CASE )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = next(iter(__SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase: List[str] = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase: Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__SCREAMING_SNAKE_CASE ):
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowerCAmelCase: Dict = ddp_input[torch.randperm(len(__SCREAMING_SNAKE_CASE ) )]
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
# Test on distributed setup that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Tuple = get_training_setup(__SCREAMING_SNAKE_CASE )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase: str = next(iter(__SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase: Dict = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase: Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__SCREAMING_SNAKE_CASE ):
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowerCAmelCase: Dict = ddp_input[torch.randperm(len(__SCREAMING_SNAKE_CASE ) )]
def a__ ( __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> int:
__lowerCAmelCase: str = Accelerator(
split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Dict = get_training_setup(__SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase: Dict = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase: List[Any] = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase: Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowerCAmelCase: Tuple = ddp_input[torch.randperm(len(__SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def a__ ( __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
__lowerCAmelCase: int = Accelerator(
split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_training_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase: str = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase: int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
step_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__lowerCAmelCase: Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def a__ ( ) -> List[str]:
__lowerCAmelCase: int = Accelerator()
__lowerCAmelCase: List[Any] = RegressionDataset(length=8_0 )
__lowerCAmelCase: Optional[int] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowerCAmelCase: str = RegressionDataset(length=9_6 )
__lowerCAmelCase: Optional[int] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__SCREAMING_SNAKE_CASE )
if iteration < len(__SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__SCREAMING_SNAKE_CASE )
if batch_num < len(__SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a__ ( ) -> Optional[int]:
__lowerCAmelCase: Optional[Any] = Accelerator()
__lowerCAmelCase: Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 217 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case :
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: int = seq_length
__lowerCAmelCase: Any = is_training
__lowerCAmelCase: List[Any] = use_input_mask
__lowerCAmelCase: Any = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: Union[str, Any] = vocab_size
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: int = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Optional[Any] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: Any = max_position_embeddings
__lowerCAmelCase: Optional[int] = type_vocab_size
__lowerCAmelCase: str = type_sequence_label_size
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: Dict = num_labels
__lowerCAmelCase: Dict = num_choices
__lowerCAmelCase: str = scope
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: List[Any] = None
if self.use_input_mask:
__lowerCAmelCase: int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: Dict = None
if self.use_token_type_ids:
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase: str = None
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = OpenLlamaModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Any = OpenLlamaModel(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = True
__lowerCAmelCase: Dict = True
__lowerCAmelCase: Union[str, Any] = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
# first forward pass
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__lowerCAmelCase: str = torch.cat([input_ids, next_tokens] , dim=-1)
__lowerCAmelCase: List[str] = torch.cat([input_mask, next_mask] , dim=-1)
__lowerCAmelCase: Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
__lowerCAmelCase: List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
__lowerCAmelCase: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__lowerCAmelCase: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3))
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): List[Any] = config_and_inputs
__lowerCAmelCase: Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = OpenLlamaModelTester(self)
__lowerCAmelCase: Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Dict = 3
__lowerCAmelCase: Optional[Any] = input_dict["input_ids"]
__lowerCAmelCase: Optional[Any] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Dict)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = 3
__lowerCAmelCase: Dict = "single_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Tuple = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: List[Any] = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Tuple = 3
__lowerCAmelCase: Any = "multi_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Optional[int] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test")
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def lowercase_ ( self : Any , UpperCamelCase__ : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Any = ids_tensor([1, 1_0] , config.vocab_size)
__lowerCAmelCase: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: List[Any] = OpenLlamaModel(UpperCamelCase__)
original_model.to(UpperCamelCase__)
original_model.eval()
__lowerCAmelCase: int = original_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: str = original_model(UpperCamelCase__).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: Dict = {"type": scaling_type, "factor": 10.0}
__lowerCAmelCase: List[str] = OpenLlamaModel(UpperCamelCase__)
scaled_model.to(UpperCamelCase__)
scaled_model.eval()
__lowerCAmelCase: Dict = scaled_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: Any = scaled_model(UpperCamelCase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
| 217 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = 13
UpperCAmelCase__ : str = 7
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Dict = 99
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : int = 32
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Any = 4
UpperCAmelCase__ : Union[str, Any] = 0.1
UpperCAmelCase__ : List[Any] = 0.1
UpperCAmelCase__ : Union[str, Any] = 512
UpperCAmelCase__ : Dict = 16
UpperCAmelCase__ : Optional[Any] = 2
UpperCAmelCase__ : int = 0.02
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : str = """last"""
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Dict = 0
def snake_case__ ( self):
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
UpperCAmelCase__ : List[str] = None
if self.use_input_lengths:
UpperCAmelCase__ : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : int = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : List[Any] = TFFlaubertModel(config=_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)
UpperCAmelCase__ : List[str] = [input_ids, input_mask]
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Optional[int] = TFFlaubertWithLMHeadModel(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase__ : Dict = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : str = TFFlaubertForQuestionAnsweringSimple(_lowerCamelCase)
UpperCAmelCase__ : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : List[Any] = TFFlaubertForSequenceClassification(_lowerCamelCase)
UpperCAmelCase__ : Any = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase__ : str = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : List[Any] = TFFlaubertForTokenClassification(config=_lowerCamelCase)
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : str = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : int = self.num_choices
UpperCAmelCase__ : int = TFFlaubertForMultipleChoice(config=_lowerCamelCase)
UpperCAmelCase__ : Tuple = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : int = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Union[str, Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : Tuple = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase :Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase :List[Any] = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase :Tuple = False
lowerCAmelCase :str = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self):
UpperCAmelCase__ : Any = TFFlaubertModelTester(self)
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_lowerCamelCase , emb_dim=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Dict = TFFlaubertModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""")
UpperCAmelCase__ : List[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase__ : int = model(_lowerCamelCase)[0]
UpperCAmelCase__ : str = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape , _lowerCamelCase)
# compare the actual values for a slice.
UpperCAmelCase__ : Any = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4)) | 283 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Union[str, Any] = 5_0000
UpperCAmelCase__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4)) | 283 | 1 |
from math import ceil
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
__UpperCAmelCase : Any = list(range(0 , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCAmelCase : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__lowerCamelCase )
# Missing blocks
__UpperCAmelCase : Optional[int] = [i for i in blocks if i not in device_map_blocks]
__UpperCAmelCase : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(__lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(__lowerCamelCase ) )
if len(__lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(__lowerCamelCase ) )
if len(__lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : List[str] = list(range(__lowerCamelCase ) )
__UpperCAmelCase : Dict = int(ceil(n_layers / len(__lowerCamelCase ) ) )
__UpperCAmelCase : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , __lowerCamelCase , __lowerCamelCase )]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
| 114 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
a : ClassVar[Features] = Features({'audio': Audio()} )
a : ClassVar[Features] = Features({'transcription': Value('string' )} )
a : str = "audio"
a : str = "transcription"
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] ) -> str:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
__UpperCAmelCase : int = copy.deepcopy(self )
__UpperCAmelCase : str = self.input_schema.copy()
__UpperCAmelCase : List[str] = features[self.audio_column]
__UpperCAmelCase : Optional[Any] = input_schema
return task_template
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 114 | 1 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a :List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def _lowercase ( __lowerCAmelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
a :Optional[int] = parser.parse_args()
a :Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 56 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : str = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] = value.to(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : List[str] = new_value
else:
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[Any]:
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : str = module.in_features
SCREAMING_SNAKE_CASE__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = sum(__lowerCAmelCase , [] )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : str = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Any = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 56 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
@slow
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
| 88 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : Tuple ={
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = '''levit'''
def __init__( self , _A=224 , _A=3 , _A=3 , _A=2 , _A=1 , _A=16 , _A=[128, 256, 384] , _A=[4, 8, 12] , _A=[4, 4, 4] , _A=[16, 16, 16] , _A=0 , _A=[2, 2, 2] , _A=[2, 2, 2] , _A=0.0_2 , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = kernel_size
__SCREAMING_SNAKE_CASE = stride
__SCREAMING_SNAKE_CASE = padding
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = version.parse('''1.11''' )
@property
def _A ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _A ( self ):
'''simple docstring'''
return 1e-4
| 365 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : str ={'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_A = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
_A = None
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=a__ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=a__ , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase__ ( a__ : Tuple ) -> Any:
UpperCamelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowerCamelCase__ ( a__ : Tuple ) -> List[str]:
def remove_articles(a__ : Union[str, Any] ):
return ARTICLES_REGEX.sub(""" """ , a__ )
def white_space_fix(a__ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(a__ : Any ):
UpperCamelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowerCamelCase__ ( a__ : Any ) -> int:
if not s:
return []
return normalize_answer(a__ ).split()
def lowerCamelCase__ ( a__ : Optional[Any] , a__ : str ) -> Tuple:
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def lowerCamelCase__ ( a__ : Any , a__ : List[Any] ) -> Optional[int]:
UpperCamelCase_ = get_tokens(a__ )
UpperCamelCase_ = get_tokens(a__ )
UpperCamelCase_ = collections.Counter(a__ ) & collections.Counter(a__ )
UpperCamelCase_ = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase_ = 1.0 * num_same / len(a__ )
UpperCamelCase_ = 1.0 * num_same / len(a__ )
UpperCamelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Tuple ) -> Tuple:
UpperCamelCase_ = {}
UpperCamelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase_ = qa["""id"""]
UpperCamelCase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase_ = [""""""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
UpperCamelCase_ = preds[qid]
# Take max over all gold answers
UpperCamelCase_ = max(compute_exact(a__ , a__ ) for a in gold_answers )
UpperCamelCase_ = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase__ ( a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Tuple ) -> Dict:
UpperCamelCase_ = {}
for qid, s in scores.items():
UpperCamelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase_ = s
return new_scores
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Optional[Any] , a__ : int=None ) -> Any:
if not qid_list:
UpperCamelCase_ = len(a__ )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCamelCase_ = len(a__ )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Optional[Any] , a__ : List[Any] ) -> Union[str, Any]:
for k in new_eval:
UpperCamelCase_ = new_eval[k]
def lowerCamelCase__ ( a__ : Optional[int] , a__ : str , a__ : Union[str, Any] , a__ : List[str] ) -> int:
plt.step(a__ , a__ , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(a__ , a__ , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def lowerCamelCase__ ( a__ : Optional[Any] , a__ : Optional[Any] , a__ : Any , a__ : List[str] , a__ : Optional[int]=None , a__ : Any=None ) -> List[Any]:
UpperCamelCase_ = sorted(a__ , key=lambda a__ : na_probs[k] )
UpperCamelCase_ = 0.0
UpperCamelCase_ = 1.0
UpperCamelCase_ = 0.0
UpperCamelCase_ = [1.0]
UpperCamelCase_ = [0.0]
UpperCamelCase_ = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase_ = true_pos / float(i + 1 )
UpperCamelCase_ = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 100.0 * avg_prec}
def lowerCamelCase__ ( a__ : str , a__ : List[str] , a__ : Optional[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
UpperCamelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase_ = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCamelCase_ = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCamelCase_ = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
UpperCamelCase_ = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(a__ , a__ , """pr_exact""" )
merge_eval(a__ , a__ , """pr_f1""" )
merge_eval(a__ , a__ , """pr_oracle""" )
def lowerCamelCase__ ( a__ : Tuple , a__ : Dict , a__ : str , a__ : List[str] ) -> Any:
if not qid_list:
return
UpperCamelCase_ = [na_probs[k] for k in qid_list]
UpperCamelCase_ = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(a__ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Any , a__ : Optional[int] , a__ : Any ) -> int:
UpperCamelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase_ = num_no_ans
UpperCamelCase_ = cur_score
UpperCamelCase_ = 0.0
UpperCamelCase_ = sorted(a__ , key=lambda a__ : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase_ = scores[qid]
else:
if preds[qid]:
UpperCamelCase_ = -1
else:
UpperCamelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase_ = cur_score
UpperCamelCase_ = na_probs[qid]
return 100.0 * best_score / len(a__ ), best_thresh
def lowerCamelCase__ ( a__ : List[Any] , a__ : Union[str, Any] , a__ : str , a__ : Union[str, Any] , a__ : Tuple , a__ : List[Any] ) -> Any:
UpperCamelCase_ , UpperCamelCase_ = find_best_thresh(a__ , a__ , a__ , a__ )
UpperCamelCase_ , UpperCamelCase_ = find_best_thresh(a__ , a__ , a__ , a__ )
UpperCamelCase_ = best_exact
UpperCamelCase_ = exact_thresh
UpperCamelCase_ = best_fa
UpperCamelCase_ = fa_thresh
def lowerCamelCase__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
UpperCamelCase_ = json.load(a__ )
UpperCamelCase_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCamelCase_ = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase_ = json.load(a__ )
else:
UpperCamelCase_ = {k: 0.0 for k in preds}
UpperCamelCase_ = make_qid_to_has_ans(a__ ) # maps qid to True/False
UpperCamelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase_ , UpperCamelCase_ = get_raw_scores(a__ , a__ )
UpperCamelCase_ = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
UpperCamelCase_ = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
UpperCamelCase_ = make_eval_dict(a__ , a__ )
if has_ans_qids:
UpperCamelCase_ = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , """HasAns""" )
if no_ans_qids:
UpperCamelCase_ = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
_A = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 122 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
@flax_register_to_config
class UpperCamelCase_ (nn.Module , __A , __A ):
__magic_name__ = 32
__magic_name__ = 4
__magic_name__ = 4
__magic_name__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__magic_name__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__magic_name__ = False
__magic_name__ = (3_20, 6_40, 12_80, 12_80)
__magic_name__ = 2
__magic_name__ = 8
__magic_name__ = None
__magic_name__ = 12_80
__magic_name__ = 0.0
__magic_name__ = False
__magic_name__ = jnp.floataa
__magic_name__ = True
__magic_name__ = 0
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : jax.random.KeyArray ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Any = jnp.zeros(_snake_case , dtype=jnp.floataa )
UpperCAmelCase_ : Dict = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase_ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = jax.random.split(_snake_case )
UpperCAmelCase_ : List[str] = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case , _snake_case , _snake_case , _snake_case )["params"]
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
UpperCAmelCase_ : Dict = self.block_out_channels
UpperCAmelCase_ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Any = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase_ : Any = FlaxTimestepEmbedding(_snake_case , dtype=self.dtype )
UpperCAmelCase_ : Optional[Any] = self.only_cross_attention
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase_ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase_ : str = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : str = output_channel
UpperCAmelCase_ : Union[str, Any] = block_out_channels[i]
UpperCAmelCase_ : int = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : str = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case , out_channels=_snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ : int = FlaxDownBlockaD(
in_channels=_snake_case , out_channels=_snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_snake_case )
UpperCAmelCase_ : int = down_blocks
# mid
UpperCAmelCase_ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : int = list(reversed(_snake_case ) )
UpperCAmelCase_ : int = list(reversed(_snake_case ) )
UpperCAmelCase_ : Dict = list(reversed(_snake_case ) )
UpperCAmelCase_ : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Any = reversed_block_out_channels[i]
UpperCAmelCase_ : List[str] = reversed_block_out_channels[min(i + 1 , len(_snake_case ) - 1 )]
UpperCAmelCase_ : Optional[int] = i == len(_snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ : Any = FlaxCrossAttnUpBlockaD(
in_channels=_snake_case , out_channels=_snake_case , prev_output_channel=_snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ : List[Any] = FlaxUpBlockaD(
in_channels=_snake_case , out_channels=_snake_case , prev_output_channel=_snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_snake_case )
UpperCAmelCase_ : List[Any] = output_channel
UpperCAmelCase_ : int = up_blocks
# out
UpperCAmelCase_ : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCAmelCase_ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , ) -> int:
if not isinstance(_snake_case , jnp.ndarray ):
UpperCAmelCase_ : Optional[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Dict = jnp.expand_dims(_snake_case , 0 )
UpperCAmelCase_ : Optional[int] = self.time_proj(_snake_case )
UpperCAmelCase_ : Tuple = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Any = jnp.transpose(_snake_case , (0, 2, 3, 1) )
UpperCAmelCase_ : List[Any] = self.conv_in(_snake_case )
# 3. down
UpperCAmelCase_ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = down_block(_snake_case , _snake_case , _snake_case , deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_block(_snake_case , _snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_snake_case , _snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : str = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ : Tuple = self.mid_block(_snake_case , _snake_case , _snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ : Dict = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase_ : Union[str, Any] = up_block(
_snake_case , temb=_snake_case , encoder_hidden_states=_snake_case , res_hidden_states_tuple=_snake_case , deterministic=not train , )
else:
UpperCAmelCase_ : Union[str, Any] = up_block(_snake_case , temb=_snake_case , res_hidden_states_tuple=_snake_case , deterministic=not train )
# 6. post-process
UpperCAmelCase_ : List[str] = self.conv_norm_out(_snake_case )
UpperCAmelCase_ : Any = nn.silu(_snake_case )
UpperCAmelCase_ : List[str] = self.conv_out(_snake_case )
UpperCAmelCase_ : Dict = jnp.transpose(_snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_snake_case )
| 357 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase_ = '''hf-internal-testing/tiny-random-bert'''
lowerCamelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCamelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(os.path.isfile(lowerCAmelCase_ ) )
# File is cached at the same place the second time.
UpperCAmelCase_ : List[str] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Using a specific revision to test the full commit hash.
UpperCAmelCase_ : int = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="9b8c223" )
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ):
UpperCAmelCase_ : List[Any] = cached_file("tiny-random-bert" , lowerCAmelCase_ )
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ):
UpperCAmelCase_ : Optional[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="aaaa" )
with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ):
UpperCAmelCase_ : Union[str, Any] = cached_file(lowerCAmelCase_ , "conf" )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ):
UpperCAmelCase_ : Any = cached_file(lowerCAmelCase_ , "conf" )
with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f:
UpperCAmelCase_ : List[str] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , ".no_exist" , lowerCAmelCase_ , "conf" ) ) )
UpperCAmelCase_ : str = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , local_files_only=lowerCAmelCase_ , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
UpperCAmelCase_ : Any = mock.Mock()
UpperCAmelCase_ : List[str] = 500
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : List[Any] = HTTPError
UpperCAmelCase_ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head:
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCAmelCase_ , revision="ahaha" )
UpperCAmelCase_ : int = get_file_from_repo("bert-base-cased" , lowerCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase_ : Optional[int] = json.loads(open(lowerCAmelCase_ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Union[str, Any] = Path(lowerCAmelCase_ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase_ , "a.txt" ) , str(lowerCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase_ , "b.txt" ) )
| 253 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_UpperCamelCase : Optional[Any] = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
_UpperCamelCase : str = 3e8 # unit of c : m * s^-1
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowercase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
_A: List[str] = jnp.bfloataa if fpaa else jnp.floataa
_A: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
_A: Tuple = jnp.bfloataa if fpaa else jnp.floataa
_A: str = '''bf16''' if fpaa else None
_A , _A: Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder='''unet''' , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
_A: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_A: Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A , _A: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase_ )
_A: List[str] = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: Optional[int] = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: List[str] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: Tuple = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_ )
_A: Optional[int] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: List[str] = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
| 121 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ : List[str] = 16
lowercase__ : Any = 32
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Optional[int]:
return int(x / 2**20)
class a__ :
def __enter__( self ) -> Tuple:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
a = torch.cuda.memory_allocated()
a = torch.cuda.max_memory_allocated()
a = bamb(self.end - self.begin )
a = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" , __UpperCamelCase = 3_20 , __UpperCamelCase = 1_60 , ) -> str:
a = AutoTokenizer.from_pretrained(__UpperCamelCase)
a = load_dataset(
"glue" , "mrpc" , split={"train": f'''train[:{n_train}]''', "validation": f'''validation[:{n_val}]'''})
def tokenize_function(__UpperCamelCase):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__UpperCamelCase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(__UpperCamelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="max_length" , max_length=1_28 , return_tensors="pt")
return tokenizer.pad(__UpperCamelCase , padding="longest" , return_tensors="pt")
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase)
a = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase)
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[Any]:
# Initialize accelerator
a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"])
a = int(config["seed"])
a = int(config["batch_size"])
a = args.model_name_or_path
set_seed(__UpperCamelCase)
a , a = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.n_train , args.n_val)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase)
# Instantiate optimizer
a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase)
if accelerator.state.deepspeed_plugin is not None:
a = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
a = 1
a = (len(__UpperCamelCase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
a = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the stating epoch so files are named properly
a = 0
# Now we train the model
a = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase):
a = model(**__UpperCamelCase)
a = outputs.loss
a = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin)))
a = tracemalloc.peaked + bamb(tracemalloc.begin)
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json") , "w") as f:
json.dump(__UpperCamelCase , __UpperCamelCase)
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
a = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path" , type=__UpperCamelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__UpperCamelCase , )
parser.add_argument(
"--output_dir" , type=__UpperCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__UpperCamelCase , default=__UpperCamelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__UpperCamelCase , default=3_20 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__UpperCamelCase , default=1_60 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__UpperCamelCase , default=1 , help="Number of train epochs." , )
a = parser.parse_args()
a = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__UpperCamelCase , __UpperCamelCase)
if __name__ == "__main__":
main()
| 180 |
from math import isqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase) + 1))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10**6) -> int:
a = 0
a = 1
a = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 180 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : List[str], A_ : Tuple, A_ : List[Any]=0 ):
'''simple docstring'''
os.makedirs(A_, exist_ok=A_ )
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase : Union[str, Any] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
_lowerCamelCase : Tuple = os.path.join(A_, A_ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A_, A_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase : Optional[Any] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowerCamelCase : int = os.path.join(A_, A_ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(A_, A_ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase : List[Any] = os.path.join(A_, F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(A_, exist_ok=A_ )
logger.info(F'''Saving model to {ckpt_dir}''' )
_lowerCamelCase : Tuple = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=A_, storage_writer=dist_cp.FileSystemWriter(A_ ), planner=DefaultSavePlanner(), )
logger.info(F'''Model saved to {ckpt_dir}''' )
def snake_case_ ( A_ : Optional[int], A_ : Union[str, Any], A_ : List[Any], A_ : Union[str, Any], A_ : Optional[int]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_lowerCamelCase : int = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
_lowerCamelCase : List[Any] = os.path.join(A_, A_ )
logger.info(F'''Loading model from {input_model_file}''' )
_lowerCamelCase : Optional[int] = torch.load(A_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase : Any = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowerCamelCase : Tuple = os.path.join(A_, A_ )
logger.info(F'''Loading model from {input_model_file}''' )
_lowerCamelCase : Any = torch.load(A_ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase : List[str] = (
os.path.join(A_, F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
_lowerCamelCase : Tuple = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A_, storage_reader=dist_cp.FileSystemReader(A_ ), planner=DefaultLoadPlanner(), )
_lowerCamelCase : Optional[int] = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(A_ )
def snake_case_ ( A_ : Any, A_ : Union[str, Any], A_ : int, A_ : Optional[Any], A_ : int, A_ : Tuple=0 ):
'''simple docstring'''
os.makedirs(A_, exist_ok=A_ )
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase : int = FSDP.optim_state_dict(A_, A_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCamelCase : Any = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowerCamelCase : List[Any] = os.path.join(A_, A_ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(A_, A_ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
_lowerCamelCase : Dict = os.path.join(A_, F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(A_, exist_ok=A_ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(A_ ), planner=DefaultSavePlanner(), )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def snake_case_ ( A_ : Tuple, A_ : Dict, A_ : Any, A_ : Optional[int], A_ : List[str], A_ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A_, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase : Any = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCamelCase : Dict = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowerCamelCase : Any = os.path.join(A_, A_ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
_lowerCamelCase : List[Any] = torch.load(A_ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_lowerCamelCase : List[str] = (
os.path.join(A_, F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
_lowerCamelCase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(A_ ), )
_lowerCamelCase : List[str] = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
_lowerCamelCase : List[str] = FSDP.optim_state_dict_to_load(A_, A_, A_ )
optimizer.load_state_dict(A_ )
| 72 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
return EnvironmentCommand()
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__ ):
A : Optional[Any] = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""", default=lowerCamelCase__, help="""The accelerate config file to use for the default values in the launching script.""", )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self, lowerCamelCase__, *lowerCamelCase__ ):
A : str = accelerate_config_file
def _lowerCAmelCase ( self ):
A : Dict = """not installed"""
if is_safetensors_available():
import safetensors
A : str = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A : List[str] = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A : List[str] = """not installed"""
A : Any = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A : Optional[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase__ ):
A : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
A : Tuple = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__, lowerCamelCase__ )
else f'''\t{accelerate_config}'''
)
A : str = """not installed"""
A : List[str] = """NA"""
if is_torch_available():
import torch
A : int = torch.__version__
A : Dict = torch.cuda.is_available()
A : str = """not installed"""
A : Optional[Any] = """NA"""
if is_tf_available():
import tensorflow as tf
A : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
A : int = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A : Tuple = bool(tf.config.list_physical_devices("""GPU""" ) )
A : List[str] = """not installed"""
A : List[Any] = """not installed"""
A : List[Any] = """not installed"""
A : List[Any] = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A : Union[str, Any] = flax.__version__
A : Any = jax.__version__
A : Tuple = jaxlib.__version__
A : Any = jax.lib.xla_bridge.get_backend().platform
A : int = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__ ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 115 |
import os
from pathlib import Path
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
from torch.utils.cpp_extension import load
A : Any = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
A : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 115 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 16 ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[str] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : str = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Dict = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
snake_case_ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCamelCase ) == "1":
snake_case_ : List[str] = 2
# New Code #
snake_case_ : Any = int(args.gradient_accumulation_steps )
snake_case_ : Dict = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : str = config['''lr''']
snake_case_ : List[str] = int(config['''num_epochs'''] )
snake_case_ : Optional[Any] = int(config['''seed'''] )
snake_case_ : List[Any] = int(config['''batch_size'''] )
snake_case_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[str] = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
snake_case_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase , model=_UpperCamelCase , local_sgd_steps=_UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
snake_case_ : Any = model(**_UpperCamelCase )
snake_case_ : int = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : List[Any] = model(**_UpperCamelCase )
snake_case_ : Dict = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
snake_case_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_UpperCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : Dict = parser.parse_args()
snake_case_ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> str:
A_ : List[str] = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase__ , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
A_ : Union[str, Any] = torch.load(hf_hub_download(repo_id=lowerCamelCase__ , filename='''pytorch_model.bin''' ) )
A_ : Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
A_ : Tuple = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
A_ : str = tensor_value
A_ : str = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
# convert tokenizer
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=a__ )
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : int = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , )
A_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : List[str] = label_list[2], label_list[1]
A_ : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : str = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
A_ : List[str] = torch.load(_lowerCamelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
A_ : Optional[int] = (
processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
)
logger.info('''Training examples: %s''' , len(_lowerCamelCase ) )
A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info('''Saving features into cached file %s''' , _lowerCamelCase )
torch.save(self.features , _lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.features[i]
def _a ( self : str ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ):
"""simple docstring"""
A_ : Optional[int] = hans_processors[task]()
A_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A_ ,A_ : Union[str, Any] = label_list[2], label_list[1]
A_ : Tuple = label_list
A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase )
A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
A_ : List[Any] = tf.data.Dataset.from_generator(
_lowerCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _a ( self : Any ):
"""simple docstring"""
return self.dataset
def __len__( self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ):
"""simple docstring"""
return self.features[i]
def _a ( self : Tuple ):
"""simple docstring"""
return self.label_list
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _a ( self : List[str] , _lowerCamelCase : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _a ( self : Any ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : Tuple = []
for i, line in enumerate(_lowerCamelCase ):
if i == 0:
continue
A_ : str = '''%s-%s''' % (set_type, line[0])
A_ : Optional[Any] = line[5]
A_ : Union[str, Any] = line[6]
A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
A_ : str = line[0]
examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
return examples
def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int:
A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
A_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
A_ : List[str] = label_map[example.label] if example.label in label_map else 0
A_ : Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
snake_case__ = {
"""hans""": 3,
}
snake_case__ = {
"""hans""": HansProcessor,
}
| 4 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : Optional[Any] ={
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int ="segformer"
def __init__( self : Optional[Any] , __A : Union[str, Any]=3 , __A : List[str]=4 , __A : Tuple=[2, 2, 2, 2] , __A : str=[8, 4, 2, 1] , __A : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , __A : str=[7, 3, 3, 3] , __A : int=[4, 2, 2, 2] , __A : List[Any]=[1, 2, 5, 8] , __A : int=[4, 4, 4, 4] , __A : List[Any]="gelu" , __A : str=0.0 , __A : Tuple=0.0 , __A : Union[str, Any]=0.1 , __A : str=0.02 , __A : Any=0.1 , __A : Dict=1e-6 , __A : Optional[Any]=2_5_6 , __A : Optional[int]=2_5_5 , **__A : List[str] , ):
super().__init__(**__A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , __A , )
__UpperCamelCase = num_channels
__UpperCamelCase = num_encoder_blocks
__UpperCamelCase = depths
__UpperCamelCase = sr_ratios
__UpperCamelCase = hidden_sizes
__UpperCamelCase = patch_sizes
__UpperCamelCase = strides
__UpperCamelCase = mlp_ratios
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = drop_path_rate
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = decoder_hidden_size
__UpperCamelCase = kwargs.get('reshape_last_stage' , __A )
__UpperCamelCase = semantic_loss_ignore_index
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =version.parse("1.11" )
@property
def _lowerCamelCase ( self : List[str] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return 1e-4
@property
def _lowerCamelCase ( self : Tuple ):
return 1_2
| 53 |
'''simple docstring'''
import os
import numpy
import onnx
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase = a.name
__UpperCamelCase = b.name
__UpperCamelCase = ''
__UpperCamelCase = ''
__UpperCamelCase = a == b
__UpperCamelCase = name_a
__UpperCamelCase = name_b
return res
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__lowercase , __lowercase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
_graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase )
def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__lowercase , __lowercase , __lowercase )
def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = os.path.dirname(__lowercase )
__UpperCamelCase = os.path.basename(__lowercase )
__UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) )
__UpperCamelCase = list(model.graph.initializer )
__UpperCamelCase = set()
__UpperCamelCase = {}
__UpperCamelCase = []
__UpperCamelCase = 0
for i in range(len(__lowercase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__lowercase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__lowercase )
dup_set.add(__lowercase )
__UpperCamelCase = inits[j].data_type
__UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __lowercase )
total_reduced_size += mem_size
__UpperCamelCase = inits[i].name
__UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__lowercase )
else:
__UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
__UpperCamelCase = sorted(__lowercase )
_remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = 'optimized_' + model_file_name
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
onnx.save(__lowercase , __lowercase )
return new_model
| 53 | 1 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__a = '__DUMMY_TRANSFORMERS_USER__'
__a = 'Dummy User'
__a = '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'
__a = 'https://hub-ci.huggingface.co'
__a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def __UpperCAmelCase ( a_: Optional[int] ):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", a_ )
@pytest.fixture
def __UpperCAmelCase ( a_: List[str] ):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", a_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", a_ )
@pytest.fixture
def __UpperCAmelCase ( a_: str ):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", a_ )
@pytest.fixture
def __UpperCAmelCase ( a_: str, a_: Any ):
HfFolder.save_token(a_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return HfApi(endpoint=a_ )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: HfApi ):
_UpperCAmelCase : Any = HfFolder.get_token()
HfFolder.save_token(a_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(a_ )
@pytest.fixture
def __UpperCAmelCase ( a_: List[str] ):
def _cleanup_repo(a_: Dict ):
hf_api.delete_repo(a_, token=a_, repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def __UpperCAmelCase ( a_: List[Any] ):
@contextmanager
def _temporary_repo(a_: str ):
try:
yield repo_id
finally:
cleanup_repo(a_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: HfApi, a_: List[str], a_: Optional[Any] ):
_UpperCAmelCase : Optional[Any] = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
_UpperCAmelCase : Optional[Any] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(a_, token=a_, repo_type="dataset", private=a_ )
hf_api.upload_file(
token=a_, path_or_fileobj=str(a_ ), path_in_repo="data/text_data.txt", repo_id=a_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(a_, token=a_, repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( a_: List[str], a_: Dict, a_: Union[str, Any] ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: HfApi, a_: Union[str, Any], a_: Optional[int] ):
_UpperCAmelCase : Dict = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
_UpperCAmelCase : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(a_, token=a_, repo_type="dataset", private=a_ )
hf_api.upload_file(
token=a_, path_or_fileobj=str(a_ ), path_in_repo="data.zip", repo_id=a_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(a_, token=a_, repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( a_: int, a_: Optional[Any], a_: Tuple ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: HfApi, a_: Optional[Any], a_: Any ):
_UpperCAmelCase : Tuple = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
_UpperCAmelCase : Optional[int] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(a_, token=a_, repo_type="dataset", private=a_ )
hf_api.upload_file(
token=a_, path_or_fileobj=str(a_ ), path_in_repo="data.zip", repo_id=a_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(a_, token=a_, repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ):
return hf_private_dataset_repo_zipped_img_data_ | 17 | '''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int = 100 ):
return sum(map(a_, str(factorial(a_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip()))) | 17 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def _snake_case ( ) -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "data2vec-text"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :int = type_vocab_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = layer_norm_eps
lowerCAmelCase_ :List[Any] = position_embedding_type
lowerCAmelCase_ :List[Any] = use_cache
lowerCAmelCase_ :List[Any] = classifier_dropout
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 84 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
lowercase : Dict = 'trocr'
lowercase : Optional[Any] = ['past_key_values']
lowercase : Optional[int] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , __UpperCamelCase=5_02_65 , __UpperCamelCase=10_24 , __UpperCamelCase=12 , __UpperCamelCase=16 , __UpperCamelCase=40_96 , __UpperCamelCase="gelu" , __UpperCamelCase=5_12 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : int = vocab_size
__UpperCamelCase : Tuple = d_model
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_ffn_dim
__UpperCamelCase : Any = activation_function
__UpperCamelCase : int = max_position_embeddings
__UpperCamelCase : Union[str, Any] = dropout
__UpperCamelCase : List[Any] = attention_dropout
__UpperCamelCase : Tuple = activation_dropout
__UpperCamelCase : Union[str, Any] = init_std
__UpperCamelCase : Union[str, Any] = decoder_layerdrop
__UpperCamelCase : List[Any] = use_cache
__UpperCamelCase : str = scale_embedding
__UpperCamelCase : Any = use_learned_position_embeddings
__UpperCamelCase : str = layernorm_embedding
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 363 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
for attribute in key.split("." ):
__UpperCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__UpperCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase : Dict = value
elif weight_type == "weight_g":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias":
__UpperCamelCase : str = value
else:
__UpperCamelCase : Union[str, Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : List[Any] = fairseq_model.state_dict()
__UpperCamelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase : Tuple = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__UpperCamelCase : Dict = True
if "*" in mapped_key:
__UpperCamelCase : str = name.split(_lowerCAmelCase )[0].split("." )[-2]
__UpperCamelCase : Optional[Any] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
__UpperCamelCase : Any = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Optional[int] = "weight_v"
elif "weight" in name:
__UpperCamelCase : str = "weight"
elif "bias" in name:
__UpperCamelCase : List[str] = "bias"
else:
__UpperCamelCase : Optional[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
__UpperCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__UpperCamelCase : Dict = name.split("." )
__UpperCamelCase : Optional[int] = int(items[0] )
__UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True ):
if config_path is not None:
__UpperCamelCase : Dict = HubertConfig.from_pretrained(_lowerCAmelCase )
else:
__UpperCamelCase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase : int = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase : Optional[Any] = target_dict.pad_index
__UpperCamelCase : Any = target_dict.bos_index
__UpperCamelCase : List[str] = target_dict.eos_index
__UpperCamelCase : Tuple = len(target_dict.symbols )
__UpperCamelCase : str = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase )
__UpperCamelCase : int = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
__UpperCamelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
__UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__UpperCamelCase : int = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = HubertForCTC(_lowerCAmelCase )
else:
__UpperCamelCase : Union[str, Any] = HubertModel(_lowerCAmelCase )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 171 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase_ = '''path-to-your-trained-model'''
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase_ = '''A photo of sks dog in a bucket'''
lowerCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 244 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
super()._init_()
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase_ (self ):
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 244 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
if gpta_config_file == "":
UpperCAmelCase_= GPTaConfig()
else:
UpperCAmelCase_= GPTaConfig.from_json_file(lowerCAmelCase_ )
UpperCAmelCase_= GPTaModel(lowerCAmelCase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Save pytorch-model
UpperCAmelCase_= pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase_= pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() ,lowerCAmelCase_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 277 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=None , ) -> str:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= seq_length
UpperCAmelCase_= is_training
UpperCAmelCase_= use_input_mask
UpperCAmelCase_= use_token_type_ids
UpperCAmelCase_= use_labels
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= num_labels
UpperCAmelCase_= num_choices
UpperCAmelCase_= scope
UpperCAmelCase_= vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_= None
if self.use_input_mask:
UpperCAmelCase_= random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_= True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
UpperCAmelCase_= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_= torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_= torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
UpperCAmelCase_= output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_= model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_= ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_= output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Tuple = False
a__ : int = False
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= GPTNeoXModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_= None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase_= GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
UpperCAmelCase_= tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_= """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase_= model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 1 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : str = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''', out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case_ = MaskFormerConfig(backbone_config=__UpperCAmelCase )
snake_case_ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
snake_case_ = 847
snake_case_ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
snake_case_ = 150
snake_case_ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
snake_case_ = 171
snake_case_ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
snake_case_ = 133
snake_case_ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
snake_case_ = 19
snake_case_ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
snake_case_ = 65
snake_case_ = '''mapillary-vistas-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = dct.pop(__UpperCAmelCase )
snake_case_ = val
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
snake_case_ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:dim, :]
snake_case_ = in_proj_bias[: dim]
snake_case_ = in_proj_weight[
dim : dim * 2, :
]
snake_case_ = in_proj_bias[
dim : dim * 2
]
snake_case_ = in_proj_weight[
-dim :, :
]
snake_case_ = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: hidden_size, :]
snake_case_ = in_proj_bias[:config.hidden_size]
snake_case_ = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case_ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ = in_proj_weight[-hidden_size :, :]
snake_case_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: hidden_size, :]
snake_case_ = in_proj_bias[:config.hidden_size]
snake_case_ = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case_ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ = in_proj_weight[-hidden_size :, :]
snake_case_ = in_proj_bias[-hidden_size :]
# fmt: on
def __magic_name__ ( ) -> torch.Tensor:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False ) -> int:
'''simple docstring'''
snake_case_ = get_maskformer_config(__UpperCAmelCase )
# load original state_dict
with open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = pickle.load(__UpperCAmelCase )
snake_case_ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case_ = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
read_in_swin_q_k_v(__UpperCAmelCase, config.backbone_config )
read_in_decoder_q_k_v(__UpperCAmelCase, __UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
snake_case_ = torch.from_numpy(__UpperCAmelCase )
# load 🤗 model
snake_case_ = MaskFormerForInstanceSegmentation(__UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__UpperCAmelCase, param.shape )
snake_case_ ,snake_case_ = model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__UpperCAmelCase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
snake_case_ = prepare_img()
if "vistas" in model_name:
snake_case_ = 65
elif "cityscapes" in model_name:
snake_case_ = 6_5535
else:
snake_case_ = 255
snake_case_ = True if '''ade''' in model_name else False
snake_case_ = MaskFormerImageProcessor(ignore_index=__UpperCAmelCase, reduce_labels=__UpperCAmelCase )
snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' )
snake_case_ = model(**__UpperCAmelCase )
print('''Logits:''', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case_ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __UpperCAmelCase, atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a : Optional[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 56 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
a : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
a : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 56 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<sep>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
a = jieba
a = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if self.remove_space:
a = ''' '''.join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
a = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 26 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26 | 1 |
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case , snake_case , snake_case )
order.append(snake_case )
return order
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case , snake_case , snake_case )
return component
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = len(snake_case ) * [False]
_lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case )
_lowerCAmelCase = []
for i, was_visited in enumerate(snake_case ):
if not was_visited:
order += topology_sort(snake_case , snake_case , snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = len(snake_case ) * [False]
for i in range(len(snake_case ) ):
_lowerCAmelCase = order[len(snake_case ) - i - 1]
if not visited[vert]:
_lowerCAmelCase = find_components(snake_case , snake_case , snake_case )
components_list.append(snake_case )
return components_list
| 82 |
import random
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = a[left_index]
snake_case = left_index + 1
for j in range(left_index + 1 ,UpperCamelCase_ ):
if a[j] < pivot:
snake_case , snake_case = a[i], a[j]
i += 1
snake_case , snake_case = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if left < right:
snake_case = random.randint(UpperCamelCase_ ,right - 1 )
snake_case , snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
snake_case = partition(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
quick_sort_random(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase_ ,pivot_index + 1 ,UpperCamelCase_ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = input('''Enter numbers separated by a comma:\n''' ).strip()
snake_case = [int(UpperCamelCase_ ) for item in user_input.split(''',''' )]
quick_sort_random(UpperCamelCase_ ,0 ,len(UpperCamelCase_ ) )
print(UpperCamelCase_ )
if __name__ == "__main__":
main()
| 127 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.q_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.k_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.v_proj.' )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__UpperCamelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 38 | 1 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 114 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = "src/diffusers"
a : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
a : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : List[str] = spec.loader.load_module()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __lowerCamelCase ) is not None
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = object_name.split(""".""" )
__UpperCAmelCase : List[Any] = 0
# First let's find the module where our object lives.
__UpperCAmelCase : Optional[Any] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__UpperCAmelCase : List[str] = """"""
__UpperCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCAmelCase : List[str] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
a : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a : Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
a : Dict = re.compile(r"<FILL\s+[^>]*>")
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[Any] = code.split("""\n""" )
__UpperCAmelCase : str = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Tuple = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__UpperCAmelCase : Optional[Any] = f"""class Bla:\n{code}"""
__UpperCAmelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCamelCase )
__UpperCAmelCase : Dict = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = style_docstrings_in_code(__lowerCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=False ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__UpperCAmelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = search.groups()
__UpperCAmelCase : Any = find_code_in_diffusers(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = get_indent(__lowerCamelCase )
__UpperCAmelCase : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCAmelCase : Any = theoretical_indent
__UpperCAmelCase : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCAmelCase : int = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__UpperCAmelCase : List[Any] = lines[line_index]
__UpperCAmelCase : str = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Optional[int] = lines[start_index:line_index]
__UpperCAmelCase : int = """""".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCAmelCase : Tuple = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__UpperCAmelCase : List[Any] = """\n""".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__UpperCAmelCase : Any = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pattern.groups()
__UpperCAmelCase : List[str] = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__UpperCAmelCase : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__UpperCAmelCase : int = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__UpperCAmelCase : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCAmelCase : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCAmelCase : Union[str, Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ ( __lowerCamelCase : bool = False ):
__UpperCAmelCase : Tuple = glob.glob(os.path.join(__lowerCamelCase , """**/*.py""" ) , recursive=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for filename in all_files:
__UpperCAmelCase : str = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = """\n""".join(__lowerCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 114 | 1 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( A__: Any ):
'''simple docstring'''
UpperCAmelCase = [False] * len(A__ )
UpperCAmelCase = [-1] * len(A__ )
def dfs(A__: List[str] , A__: int ):
UpperCAmelCase = True
UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(A__ , 1 - c )
for i in range(len(A__ ) ):
if not visited[i]:
dfs(A__ , 0 )
for i in range(len(A__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__magic_name__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 152 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = (UniPCMultistepScheduler,)
lowerCAmelCase__ = (('num_inference_steps', 2_5),)
def lowercase_ ( self : Optional[int] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**_A )
return config
def lowercase_ ( self : Optional[Any] , _A : str=0 , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = dict(self.forward_default_kwargs )
UpperCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , _A )
UpperCAmelCase__ : List[str] = self.dummy_sample
UpperCAmelCase__ : Dict = 0.1 * sample
UpperCAmelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Any = self.get_scheduler_config(**_A )
UpperCAmelCase__ : str = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
UpperCAmelCase__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
UpperCAmelCase__ : Optional[Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
UpperCAmelCase__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase__ : int = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCAmelCase__ : int = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self : Any , _A : List[str]=0 , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase__ : str = kwargs.pop('''num_inference_steps''' , _A )
UpperCAmelCase__ : List[Any] = self.dummy_sample
UpperCAmelCase__ : List[Any] = 0.1 * sample
UpperCAmelCase__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : List[Any] = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
UpperCAmelCase__ : Optional[int] = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase__ : str = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCAmelCase__ : Dict = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self : Union[str, Any] , _A : Union[str, Any]=None , **_A : Optional[Any] ):
'''simple docstring'''
if scheduler is None:
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config(**_A )
UpperCAmelCase__ : Any = scheduler_class(**_A )
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config(**_A )
UpperCAmelCase__ : Tuple = scheduler_class(**_A )
UpperCAmelCase__ : Optional[int] = 10
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : List[Any] = model(_A , _A )
UpperCAmelCase__ : List[str] = scheduler.step(_A , _A , _A ).prev_sample
return sample
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase__ : Any = kwargs.pop('''num_inference_steps''' , _A )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Any = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_A )
UpperCAmelCase__ : Tuple = self.dummy_sample
UpperCAmelCase__ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ):
UpperCAmelCase__ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
UpperCAmelCase__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase__ : Dict = scheduler.timesteps[5]
UpperCAmelCase__ : List[Any] = scheduler.timesteps[6]
UpperCAmelCase__ : Optional[int] = scheduler.step(_A , _A , _A , **_A ).prev_sample
UpperCAmelCase__ : str = scheduler.step(_A , _A , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase__ : Tuple = self.full_loop(scheduler=_A )
UpperCAmelCase__ : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
UpperCAmelCase__ : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase__ : int = self.full_loop(scheduler=_A )
UpperCAmelCase__ : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def lowercase_ ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , solver_order=_A , solver_type=_A , )
def lowercase_ ( self : Any ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , )
UpperCAmelCase__ : str = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def lowercase_ ( self : Tuple ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.full_loop()
UpperCAmelCase__ : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
UpperCAmelCase__ : int = scheduler_class(**_A )
UpperCAmelCase__ : str = 10
UpperCAmelCase__ : Dict = self.dummy_model()
UpperCAmelCase__ : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : str = model(_A , _A )
UpperCAmelCase__ : str = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
def lowercase_ ( self : Optional[int] , **_A : int ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config(**_A )
UpperCAmelCase__ : Any = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 181 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '''▁'''
UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
UpperCamelCase__ = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Optional[Any]="<s>" , _A : List[str]="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Tuple="<unk>" , _A : List[str]="<pad>" , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase__ : Optional[int] = 7
UpperCAmelCase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase__ : Tuple = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
UpperCAmelCase__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase__ : int = len(self.sp_model )
UpperCAmelCase__ : Optional[int] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
UpperCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.__dict__.copy()
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase__ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : Optional[Any] , _A : str ):
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def lowercase_ ( self : List[str] , _A : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ : Union[str, Any] = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : List[Any] , _A : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : int , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def lowercase_ ( self : Any , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : List[str] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
UpperCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 181 | 1 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( snake_case :np.ndarray ) -> Optional[int]:
return input_array.reshape((input_array.size, 1) )
def A ( snake_case :np.ndarray , snake_case :np.ndarray , snake_case :int ) -> str:
__UpperCamelCase = np.nan
for i in range(_a ):
__UpperCamelCase = features[:, labels == i]
__UpperCamelCase = data.mean(1 )
# Centralize the data of class i
__UpperCamelCase = data - column_reshape(_a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__UpperCamelCase = np.dot(_a , centered_data.T )
return covariance_sum / features.shape[1]
def A ( snake_case :np.ndarray , snake_case :np.ndarray , snake_case :int ) -> Optional[int]:
__UpperCamelCase = features.mean(1 )
__UpperCamelCase = np.nan
for i in range(_a ):
__UpperCamelCase = features[:, labels == i]
__UpperCamelCase = data.shape[1]
__UpperCamelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__UpperCamelCase = device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
return covariance_sum / features.shape[1]
def A ( snake_case :np.ndarray , snake_case :int ) -> int:
if features.any():
__UpperCamelCase = features.mean(1 )
# Center the dataset
__UpperCamelCase = features - np.reshape(_a , (data_mean.size, 1) )
__UpperCamelCase = np.dot(_a , centered_data.T ) / features.shape[1]
__UpperCamelCase , __UpperCamelCase = np.linalg.eigh(_a )
# Take all the columns in the reverse order (-1), and then takes only the first
__UpperCamelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__UpperCamelCase = np.dot(filtered_eigenvectors.T , _a )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_a )
logging.error('Dataset empty' )
raise AssertionError
def A ( snake_case :np.ndarray , snake_case :np.ndarray , snake_case :int , snake_case :int ) -> Optional[int]:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__UpperCamelCase , __UpperCamelCase = eigh(
covariance_between_classes(_a , _a , _a ) , covariance_within_classes(_a , _a , _a ) , )
__UpperCamelCase = eigenvectors[:, ::-1][:, :dimensions]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = np.linalg.svd(_a )
__UpperCamelCase = svd_matrix[:, 0:dimensions]
__UpperCamelCase = np.dot(filtered_svd_matrix.T , _a )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_a )
logging.error('Dataset empty' )
raise AssertionError
def A ( ) -> Tuple:
__UpperCamelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__UpperCamelCase = np.array([0, 0, 0, 1, 1] )
__UpperCamelCase = 2
__UpperCamelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_a ) as error_info:
__UpperCamelCase = linear_discriminant_analysis(
_a , _a , _a , _a )
if isinstance(_a , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def A ( ) -> Optional[Any]:
__UpperCamelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__UpperCamelCase = 2
__UpperCamelCase = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(_a ) as error_info:
__UpperCamelCase = principal_component_analysis(_a , _a )
if not np.allclose(_a , _a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( snake_case :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[int] ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
__UpperCamelCase = [False] * len(snake_case )
__UpperCamelCase = [s]
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def A ( snake_case :int , snake_case :Any , snake_case :Union[str, Any] ) -> Optional[int]:
__UpperCamelCase = [-1] * (len(snake_case ))
__UpperCamelCase = 0
__UpperCamelCase = []
__UpperCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case , snake_case , snake_case , snake_case ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(snake_case , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
for i in range(len(snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 263 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = get_failure_array(_A )
# 2) Step through text searching for pattern
lowercase , lowercase = 0, 0 # index into text, pattern
while i < len(_A ):
if pattern[j] == text[i]:
if j == (len(_A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [0]
lowercase = 0
lowercase = 1
while j < len(_A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase = failure[i - 1]
continue
j += 1
failure.append(_A )
return failure
if __name__ == "__main__":
# Test 1)
lowercase__ :Any = "abc1abc12"
lowercase__ :str = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase__ :Dict = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase__ :List[str] = "ABABX"
lowercase__ :Optional[int] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
lowercase__ :Union[str, Any] = "AAAB"
lowercase__ :Union[str, Any] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
lowercase__ :List[str] = "abcdabcy"
lowercase__ :int = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
lowercase__ :Dict = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 101 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = [800, 1333]
lowerCAmelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = 330
lowerCAmelCase_ = 14
lowerCAmelCase_ = 6
lowerCAmelCase_ = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase_ = [800, 1344]
lowerCAmelCase_ = 91
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''coco-detection-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( _A , _A , _A = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _A ):
if "backbone" in name:
lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCamelCase ( _A , _A ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(_A )
if "qkv" in key:
lowerCAmelCase_ = key.split('''.''' )
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = val
return orig_state_dict
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A , _A = False ):
lowerCAmelCase_ = get_yolos_config(_A )
# load original state_dict
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCAmelCase_ = YolosForObjectDetection(_A )
model.eval()
lowerCAmelCase_ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512
lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes
lowerCAmelCase_ , lowerCAmelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase_ = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase_ = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase_ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase_ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase_ = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase_ = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase_ = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase_ = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase_ = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
if push_to_hub:
lowerCAmelCase_ = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCAmelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(_A , organization='''hustvl''' )
model.push_to_hub(_A , organization='''hustvl''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['OwlViTFeatureExtractor']
lowerCAmelCase_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = "detr"
__lowerCamelCase : Any = ["past_key_values"]
__lowerCamelCase : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=3 , _lowerCAmelCase=100 , _lowerCAmelCase=6 , _lowerCAmelCase=2048 , _lowerCAmelCase=8 , _lowerCAmelCase=6 , _lowerCAmelCase=2048 , _lowerCAmelCase=8 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=256 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1.0 , _lowerCAmelCase=False , _lowerCAmelCase="sine" , _lowerCAmelCase="resnet50" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=1 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=1 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=0.1 , **_lowerCAmelCase , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = backbone_config.get("model_type" )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(_lowerCAmelCase )
# set timm attributes to None
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None, None, None
_lowerCAmelCase = use_timm_backbone
_lowerCAmelCase = backbone_config
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_queries
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = auxiliary_loss
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = backbone
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = dilation
# Hungarian matcher
_lowerCAmelCase = class_cost
_lowerCAmelCase = bbox_cost
_lowerCAmelCase = giou_cost
# Loss coefficients
_lowerCAmelCase = mask_loss_coefficient
_lowerCAmelCase = dice_loss_coefficient
_lowerCAmelCase = bbox_loss_coefficient
_lowerCAmelCase = giou_loss_coefficient
_lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , _lowerCAmelCase , **_lowerCAmelCase ) -> str:
return cls(backbone_config=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self ) -> Dict[str, any]:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-5
@property
def _snake_case ( self ) -> int:
return 12
| 158 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158 | 1 |
from __future__ import annotations
_snake_case = 1.6021e-19 # units = C
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False ):
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for i in range(config.num_hidden_layers ):
lowerCamelCase_ = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=lowerCamelCase__ )
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
if "vqa" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = 3_1_2_9
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "vqa2-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = ViltForQuestionAnswering(lowerCamelCase__ )
elif "nlvr" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = 2
lowerCamelCase_ = {0: "False", 1: "True"}
lowerCamelCase_ = {v: k for k, v in config.idalabel.items()}
lowerCamelCase_ = 3
lowerCamelCase_ = ViltForImagesAndTextClassification(lowerCamelCase__ )
elif "irtr" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = ViltForImageAndTextRetrieval(lowerCamelCase__ )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase_ = True
lowerCamelCase_ = ViltForMaskedLM(lowerCamelCase__ )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["state_dict"]
lowerCamelCase_ = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
if mlm_model or irtr_model:
lowerCamelCase_ = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase__ )
# Define processor
lowerCamelCase_ = ViltImageProcessor(size=3_8_4 )
lowerCamelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase_ = ViltProcessor(lowerCamelCase__ , lowerCamelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
lowerCamelCase_ = processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase_ = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=lowerCamelCase__ ).raw )
if mlm_model:
lowerCamelCase_ = "a bunch of [MASK] laying on a [MASK]."
else:
lowerCamelCase_ = "How many cats are there?"
lowerCamelCase_ = processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
# Verify outputs
if mlm_model:
lowerCamelCase_ = torch.Size([1, 1_1, 3_0_5_2_2] )
lowerCamelCase_ = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase__ , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCamelCase_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase_ = torch.Size([1, 3_1_2_9] )
lowerCamelCase_ = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCamelCase__ , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase_ = torch.Size([1, 2] )
lowerCamelCase_ = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 19 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 178 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
pass
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = np.array(__a )
UpperCamelCase__ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase_ (self ):
pass
@slow
@require_torch
def UpperCAmelCase_ (self ):
UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """facebook/sam-vit-huge"""
UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
] , )
| 178 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Tuple = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''markuplm'''
def __init__( self : Tuple , UpperCamelCase_ : Optional[int]=3_0_5_2_2 , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Dict=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Optional[int]=5_1_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]=2_5_6 , UpperCamelCase_ : Tuple=1_0_2_4 , UpperCamelCase_ : Any=2_1_6 , UpperCamelCase_ : int=1_0_0_1 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : Dict=5_0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : str , ):
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : str = position_embedding_type
lowerCAmelCase : str = use_cache
lowerCAmelCase : Tuple = classifier_dropout
# additional properties
lowerCAmelCase : List[str] = max_depth
lowerCAmelCase : Optional[int] = max_xpath_tag_unit_embeddings
lowerCAmelCase : str = max_xpath_subs_unit_embeddings
lowerCAmelCase : List[str] = tag_pad_id
lowerCAmelCase : int = subs_pad_id
lowerCAmelCase : int = xpath_unit_hidden_size
| 60 | """simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: List[Any] = {}
lowercase__: List[Any] = {}
lowercase__: Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__: Dict = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowercase__: Any = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowercase__: Optional[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__: Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowercase__: Tuple = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowercase__: List[str] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowercase__: str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowercase__: List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowercase__: Dict = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowercase__: int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ):
lowercase__: Union[str, Any] = load_image(_UpperCAmelCase )
lowercase__: Dict = self.image_processor.size['''longest_edge''']
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
lowercase__: Tuple = self.get_inference_context()
with inference_context():
lowercase__: Optional[Any] = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
lowercase__: Any = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
lowercase__: Tuple = image_embeddings
lowercase__: Optional[Any] = grid_points.shape[1]
lowercase__: Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = grid_points[:, i : i + points_per_batch, :, :]
lowercase__: int = input_labels[:, i : i + points_per_batch]
lowercase__: Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0.88 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ):
lowercase__: List[Any] = model_inputs.pop('''input_boxes''' )
lowercase__: List[Any] = model_inputs.pop('''is_last''' )
lowercase__: Any = model_inputs.pop('''original_sizes''' ).tolist()
lowercase__: Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
lowercase__: List[Any] = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__: int = model_outputs['''pred_masks''']
lowercase__: str = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
lowercase__: str = model_outputs['''iou_scores''']
lowercase__, lowercase__, lowercase__: Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ):
lowercase__: int = []
lowercase__: str = []
lowercase__: List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
lowercase__: Any = torch.cat(_UpperCAmelCase )
lowercase__: Dict = torch.cat(_UpperCAmelCase )
lowercase__, lowercase__, lowercase__, lowercase__: Any = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
lowercase__: Any = {}
if output_rle_mask:
lowercase__: Optional[Any] = rle_mask
if output_bboxes_mask:
lowercase__: Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 177 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['GLPNFeatureExtractor']
_a = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Tuple = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : int = 32 , lowerCAmelCase__ : Tuple=PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Dict , ) -> None:
'''simple docstring'''
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = size_divisor
_UpperCamelCase = resample
super().__init__(**lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[ChannelDimension] = None , **lowerCAmelCase__ : List[str] ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = get_image_size(lowerCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_UpperCamelCase = height // size_divisor * size_divisor
_UpperCamelCase = width // size_divisor * size_divisor
_UpperCamelCase = resize(lowerCAmelCase__ , (new_h, new_w) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
return image
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[ChannelDimension] = None , **lowerCAmelCase__ : Any ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : int , lowerCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[TensorType, str]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : List[Any] , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = size_divisor if size_divisor is not None else self.size_divisor
_UpperCamelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for img in images]
if do_resize:
_UpperCamelCase = [self.resize(lowerCAmelCase__ , size_divisor=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(lowerCAmelCase__ , scale=1 / 255 ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 324 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase :Tuple = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
A_ : str = size if size is not None else {"""shortest_edge""": 384}
A_ : Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : Any = do_resize
A_ : int = size
# Default value set here for backwards compatibility where the value in config is None
A_ : Tuple = crop_pct if crop_pct is not None else 224 / 256
A_ : Optional[int] = resample
A_ : Any = do_rescale
A_ : Optional[Any] = rescale_factor
A_ : str = do_normalize
A_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a (self , lowercase , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : Tuple = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A_ : Union[str, Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A_ : Any = int(shortest_edge / crop_pct )
A_ : Optional[Any] = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
A_ : int = resize(image=lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase , size=(shortest_edge, shortest_edge) , data_format=lowercase , **lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase , size=(shortest_edge, shortest_edge) , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : Tuple = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A_ : Optional[Any] = resample if resample is not None else self.resample
A_ : Any = do_rescale if do_rescale is not None else self.do_rescale
A_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : List[Any] = image_std if image_std is not None else self.image_std
A_ : List[Any] = size if size is not None else self.size
A_ : Any = get_size_dict(lowercase , default_to_square=lowercase )
A_ : str = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ : Tuple = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : str = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_rescale:
A_ : Any = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : List[str] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Optional[int] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : int = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 369 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case_ : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : List[str] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : List[Any] = {
"unc-nlp/lxmert-base-uncased": 5_12,
}
snake_case_ : List[str] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : int = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : int = PRETRAINED_INIT_CONFIGURATION
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = LxmertTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Tuple=True , __magic_name__ : Optional[Any]="[UNK]" , __magic_name__ : Any="[SEP]" , __magic_name__ : Dict="[PAD]" , __magic_name__ : int="[CLS]" , __magic_name__ : Optional[Any]="[MASK]" , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=None , **__magic_name__ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : List[Any] = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : Optional[int] = do_lower_case
UpperCAmelCase_ : Optional[Any] = strip_accents
UpperCAmelCase_ : List[Any] = tokenize_chinese_chars
UpperCAmelCase_ : Any = normalizer_class(**__magic_name__ )
UpperCAmelCase_ : Any = do_lower_case
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Any , __magic_name__ : Dict=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : str = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 125 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __a (lowerCamelCase ):
__a : Union[str, Any] = "lilt"
def __init__( self : Any , __magic_name__ : Tuple=3_05_22 , __magic_name__ : str=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : int=12 , __magic_name__ : str=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[Any]=2 , __magic_name__ : Dict=0.0_2 , __magic_name__ : List[Any]=1E-12 , __magic_name__ : List[str]=0 , __magic_name__ : List[str]="absolute" , __magic_name__ : str=None , __magic_name__ : Dict=4 , __magic_name__ : str=10_24 , **__magic_name__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : Tuple = classifier_dropout
UpperCAmelCase_ : Dict = channel_shrink_ratio
UpperCAmelCase_ : int = max_ad_position_embeddings
| 125 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ : Any = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ : int = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Union[str, Any] ) -> tuple[str, float]:
a_ : Union[str, Any] = len([g for position, g in enumerate(_lowerCAmelCase ) if g == main_target[position]] )
return (item, float(_lowerCAmelCase ))
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[str] ) -> tuple[str, str]:
a_ : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
a_ : Dict = parent_a[:random_slice] + parent_a[random_slice:]
a_ : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : str ) -> str:
a_ : str = list(_lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a_ : int = random.choice(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Union[str, Any] , __A : Tuple , ) -> list[str]:
a_ : Dict = []
# Generate more children proportionally to the fitness score.
a_ : int = int(parent_a[1] * 1_00 ) + 1
a_ : List[str] = 10 if child_n >= 10 else child_n
for _ in range(_lowerCAmelCase ):
a_ : str = population_score[random.randint(0 , _lowerCAmelCase )][0]
a_ : Any = crossover(parent_a[0] , _lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
pop.append(mutate(_lowerCAmelCase , _lowerCAmelCase ) )
return pop
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str , __A : int = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a_ : List[Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
a_ : Dict = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a_ : Optional[int] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowerCAmelCase )
# Generate random starting population.
a_ : str = []
for _ in range(_lowerCAmelCase ):
population.append(''.join([random.choice(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
a_ : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a_ : str = [evaluate(_lowerCAmelCase , _lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
a_ : str = sorted(_lowerCAmelCase , key=lambda __A : x[1] , reverse=_lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a_ : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCAmelCase )
# Normalize population score to be between 0 and 1.
a_ : Optional[int] = [
(item, score / len(_lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCAmelCase ):
population.extend(select(population_score[int(_lowerCAmelCase )] , _lowerCAmelCase , _lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCAmelCase_ : Any = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 371 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a( UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_, UpperCamelCase_ )
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
SCREAMING_SNAKE_CASE__ : List[str] =s_dict.pop(UpperCamelCase_ )
elif "subsample" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =s_dict.pop(UpperCamelCase_ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =emb.weight.shape
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Linear(UpperCamelCase_, UpperCamelCase_, bias=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] =emb.weight.data
return lin_layer
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.load(UpperCamelCase_, map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ : Dict =mam_aaa['''args''']
SCREAMING_SNAKE_CASE__ : List[str] =mam_aaa['''model''']
SCREAMING_SNAKE_CASE__ : Tuple =state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase_ )
rename_keys(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =state_dict['''decoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE__ : List[str] =args.share_decoder_input_output_embed
SCREAMING_SNAKE_CASE__ : Any =[int(UpperCamelCase_ ) for i in args.conv_kernel_sizes.split(''',''' )]
SCREAMING_SNAKE_CASE__ : int =SpeechaTextConfig(
vocab_size=UpperCamelCase_, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(UpperCamelCase_ ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCamelCase_, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCamelCase_, num_beams=5, max_length=2_0_0, use_cache=UpperCamelCase_, decoder_start_token_id=2, early_stopping=UpperCamelCase_, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SpeechaTextForConditionalGeneration(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =model.model.load_state_dict(UpperCamelCase_, strict=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0 and not set(UpperCamelCase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
SCREAMING_SNAKE_CASE__ : List[Any] =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE__ : Dict =lm_head_weights
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path) | 152 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Any =GPTSanJapaneseTokenizer
A__ : str =False
A__ : int ={"""do_clean_text""": False, """add_prefix_space""": False}
def A_ ( self : Any ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE__ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
SCREAMING_SNAKE_CASE__ = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase_ ) )
def A_ ( self : str , **UpperCAmelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : int , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def A_ ( self : Any , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_input_output_texts(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def A_ ( self : str ):
pass # TODO add if relevant
def A_ ( self : Tuple ):
pass # TODO add if relevant
def A_ ( self : int ):
pass # TODO add if relevant
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。 こんばんは、㔺界。'
SCREAMING_SNAKE_CASE__ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
SCREAMING_SNAKE_CASE__ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE__ = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。こんばんは、世界。😀'
SCREAMING_SNAKE_CASE__ = tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , prefix_text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
SCREAMING_SNAKE_CASE__ = 'こんにちは、世界。'
SCREAMING_SNAKE_CASE__ = 'こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE__ = len(tokenizer.encode(UpperCAmelCase_ ) ) - 2
SCREAMING_SNAKE_CASE__ = len(tokenizer.encode(UpperCAmelCase_ ) ) - 2
SCREAMING_SNAKE_CASE__ = [1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE__ = [1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE__ = tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE__ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ , prefix_text=UpperCAmelCase_ ).token_type_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('あンいワ' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('' , prefix_text='あンいワ' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(UpperCAmelCase_ ) , tokenizer.decode(UpperCAmelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCAmelCase_ ) , tokenizer.decode(UpperCAmelCase_ ) )
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
SCREAMING_SNAKE_CASE__ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_encode_plus(UpperCAmelCase_ , padding=UpperCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
SCREAMING_SNAKE_CASE__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase_ )
def A_ ( self : Tuple ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def A_ ( self : List[str] ):
# tokenizer has no padding token
pass
| 176 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase ( a__ : str = "" ) -> dict[str, float]:
_UpperCamelCase = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase = BeautifulSoup(requests.get(a__ ).text , '''html.parser''' )
_UpperCamelCase = soup.find_all('''td''' , attrs='''titleColumn''' )
_UpperCamelCase = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a__ , a__ )
}
def lowercase ( a__ : str = "IMDb_Top_250_Movies.csv" ) -> None:
_UpperCamelCase = get_imdb_top_aaa_movies()
with open(a__ , '''w''' , newline='''''' ) as out_file:
_UpperCamelCase = csv.writer(a__ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 361 | """simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 54 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 300 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300 | 1 |
import math
class a_ :
def __init__( self :Dict , _lowercase :Union[str, Any]=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ = n
UpperCAmelCase_ = [
[math.inf for j in range(0 , _lowercase)] for i in range(0 , _lowercase)
] # adjacency matrix for weight
UpperCAmelCase_ = [
[math.inf for j in range(0 , _lowercase)] for i in range(0 , _lowercase)
] # dp[i][j] stores minimum distance from i to j
def __a ( self :Any , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Optional[int]) -> Any:
UpperCAmelCase_ = w
def __a ( self :str) -> Optional[Any]:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def __a ( self :int , _lowercase :Tuple , _lowercase :Optional[int]) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 344 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344 | 1 |
import math
import qiskit
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(SCREAMING_SNAKE_CASE__ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
_snake_case : Dict = qiskit.QuantumRegister(4 , """qr""" )
_snake_case : Optional[int] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
_snake_case : Union[str, Any] = [input_a, input_a, carry_in]
_snake_case : int = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE__ ) # measure the last two qbits
_snake_case : List[str] = qiskit.Aer.get_backend("""aer_simulator""" )
_snake_case : Dict = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 317 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=1_3 , UpperCAmelCase_ : List[str]=3_2 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : int=[3_2, 6_4, 1_2_8] , UpperCAmelCase_ : Optional[int]=[1, 2, 1] , UpperCAmelCase_ : int=[2, 2, 4] , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Dict=2.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=1e-5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Union[str, Any]=["stage1", "stage2"] , UpperCAmelCase_ : List[Any]=[1, 2] , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[int] = batch_size
a : Optional[Any] = image_size
a : Tuple = patch_size
a : List[str] = num_channels
a : str = embed_dim
a : Any = hidden_sizes
a : Dict = depths
a : str = num_heads
a : List[Any] = window_size
a : Optional[Any] = mlp_ratio
a : List[str] = qkv_bias
a : str = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Tuple = drop_path_rate
a : Optional[Any] = hidden_act
a : Dict = use_absolute_embeddings
a : List[Any] = patch_norm
a : Optional[Any] = layer_norm_eps
a : str = initializer_range
a : str = is_training
a : Optional[int] = scope
a : Tuple = use_labels
a : Optional[Any] = type_sequence_label_size
a : Union[str, Any] = encoder_stride
a : Tuple = out_features
a : int = out_indices
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : Optional[Any] = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Tuple = FocalNetModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
a : str = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Tuple = FocalNetBackbone(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
a : Optional[int] = None
a : int = FocalNetBackbone(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : str = FocalNetForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : Optional[Any] = 1
a : int = FocalNetForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : str = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.type_sequence_label_size
a : List[str] = FocalNetForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : Tuple = 1
a : List[Any] = FocalNetForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Any = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : str = False
A : List[str] = False
A : Any = False
A : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = FocalNetModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=3_7 , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@unittest.skip(reason='FocalNet does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking')
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : int = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
a : List[str] = model_class(UpperCAmelCase_)
a : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Any = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Tuple = outputs.hidden_states
a : int = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# FocalNet has a different seq_length
a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
a : int = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
a , a , a , a : Optional[int] = reshaped_hidden_states[0].shape
a : Union[str, Any] = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
a : List[str] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Union[str, Any] = 3
a : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
a : str = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = FocalNetModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = _config_zero_init(UpperCAmelCase_)
for model_class in self.all_model_classes:
a : Dict = model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(UpperCAmelCase_)
a : int = self.default_image_processor
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : Optional[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
# verify the logits
a : str = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : str = torch.tensor([0.21_66, -0.43_68, 0.21_91]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_8_1)
@require_torch
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
A : Optional[Any] = FocalNetConfig
A : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = FocalNetModelTester(self)
| 353 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
set_seed(7_70)
_lowerCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_lowerCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_lowerCamelCase = os.path.dirname(os.path.abspath(__file__))
_lowerCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
_lowerCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[str]=False ) -> str:
UpperCAmelCase_ = model_type
if use_small:
key += "_small"
return os.path.join(__UpperCamelCase , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : int ) -> Union[str, Any]:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
hf_hub_download(repo_id=__UpperCamelCase , filename=__UpperCamelCase , local_dir=__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[str]=False , __UpperCamelCase : List[str]="text" ) -> str:
if model_type == "text":
UpperCAmelCase_ = BarkSemanticModel
UpperCAmelCase_ = BarkSemanticConfig
UpperCAmelCase_ = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCAmelCase_ = BarkCoarseModel
UpperCAmelCase_ = BarkCoarseConfig
UpperCAmelCase_ = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCAmelCase_ = BarkFineModel
UpperCAmelCase_ = BarkFineConfig
UpperCAmelCase_ = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCAmelCase_ = f'{model_type}_small' if use_small else model_type
UpperCAmelCase_ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__UpperCamelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
UpperCAmelCase_ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
# this is a hack
UpperCAmelCase_ = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
UpperCAmelCase_ = model_args['''vocab_size''']
UpperCAmelCase_ = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCAmelCase_ = model_args.pop('''n_head''' )
UpperCAmelCase_ = model_args.pop('''n_embd''' )
UpperCAmelCase_ = model_args.pop('''n_layer''' )
UpperCAmelCase_ = ConfigClass(**checkpoint['''model_args'''] )
UpperCAmelCase_ = ModelClass(config=__UpperCamelCase )
UpperCAmelCase_ = GenerationConfigClass()
UpperCAmelCase_ = model_generation_config
UpperCAmelCase_ = checkpoint['''model''']
# fixup checkpoint
UpperCAmelCase_ = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(__UpperCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
UpperCAmelCase_ = k[len(__UpperCamelCase ) :]
for old_layer_name in new_layer_name_dict:
UpperCAmelCase_ = new_k.replace(__UpperCamelCase , new_layer_name_dict[old_layer_name] )
UpperCAmelCase_ = state_dict.pop(__UpperCamelCase )
UpperCAmelCase_ = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCAmelCase_ = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
UpperCAmelCase_ = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCAmelCase_ = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(__UpperCamelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(__UpperCamelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
UpperCAmelCase_ = model.num_parameters(exclude_embeddings=__UpperCamelCase )
UpperCAmelCase_ = checkpoint['''best_val_loss'''].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(__UpperCamelCase , 3 )} loss' )
model.eval()
model.to(__UpperCamelCase )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Tuple="text" ) -> Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCAmelCase_ = '''cpu''' # do conversion on cpu
UpperCAmelCase_ = _get_ckpt_path(__UpperCamelCase , use_small=__UpperCamelCase )
UpperCAmelCase_ = _load_model(__UpperCamelCase , __UpperCamelCase , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
# load bark initial model
UpperCAmelCase_ = _bark_load_model(__UpperCamelCase , '''cpu''' , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
if model_type == "text":
UpperCAmelCase_ = bark_model['''model''']
if model.num_parameters(exclude_embeddings=__UpperCamelCase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
UpperCAmelCase_ = 5
UpperCAmelCase_ = 10
if model_type in ["text", "coarse"]:
UpperCAmelCase_ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
UpperCAmelCase_ = bark_model(__UpperCamelCase )[0]
UpperCAmelCase_ = model(__UpperCamelCase )
# take last logits
UpperCAmelCase_ = output_new_model_total.logits[:, [-1], :]
else:
UpperCAmelCase_ = 3
UpperCAmelCase_ = 8
UpperCAmelCase_ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCAmelCase_ = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = bark_model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , ) -> Optional[Any]:
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = BarkSemanticConfig.from_pretrained(os.path.join(__UpperCamelCase , '''config.json''' ) )
UpperCAmelCase_ = BarkCoarseConfig.from_pretrained(os.path.join(__UpperCamelCase , '''config.json''' ) )
UpperCAmelCase_ = BarkFineConfig.from_pretrained(os.path.join(__UpperCamelCase , '''config.json''' ) )
UpperCAmelCase_ = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
UpperCAmelCase_ = BarkSemanticModel.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = BarkCoarseModel.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = BarkFineModel.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
UpperCAmelCase_ = BarkConfig.from_sub_model_configs(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCAmelCase_ = BarkModel(__UpperCamelCase )
UpperCAmelCase_ = semantic
UpperCAmelCase_ = coarseAcoustic
UpperCAmelCase_ = fineAcoustic
UpperCAmelCase_ = codec
UpperCAmelCase_ = bark_generation_config
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
bark.save_pretrained(__UpperCamelCase , repo_id=__UpperCamelCase , push_to_hub=__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_lowerCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 350 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['ConditionalDetrFeatureExtractor']
_lowerCamelCase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 | 0 |
def __A ( __lowerCAmelCase = 100 )-> int:
"""simple docstring"""
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 39 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( ) -> str:
lowercase__: Any = '''mock-s3-bucket'''
lowercase__: Optional[int] = F"""s3://{mock_bucket}"""
lowercase__: Optional[Any] = extract_path_from_uri(__UpperCAmelCase )
assert dataset_path.startswith('''s3://''' ) is False
lowercase__: List[str] = '''./local/path'''
lowercase__: List[str] = extract_path_from_uri(__UpperCAmelCase )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
lowercase__: Any = is_remote_filesystem(__UpperCAmelCase )
assert is_remote is True
lowercase__: Union[str, Any] = fsspec.filesystem('''file''' )
lowercase__: str = is_remote_filesystem(__UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Dict = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase__: List[str] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase__: Union[str, Any] = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCAmelCase )
lowercase__: Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: List[str] = os.path.basename(__UpperCAmelCase )
lowercase__: List[str] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f, open(__UpperCAmelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase__: List[str] = compressed_file_paths[protocol]
lowercase__: int = '''dataset.jsonl'''
lowercase__: Optional[int] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase__, *lowercase__: Optional[Any] = fsspec.get_fs_token_paths(__UpperCAmelCase )
assert fs.isfile(__UpperCAmelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: List[str] = hf_api.dataset_info(__UpperCAmelCase , token=__UpperCAmelCase )
lowercase__: Optional[int] = HfFileSystem(repo_info=__UpperCAmelCase , token=__UpperCAmelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__UpperCAmelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Optional[int] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__UpperCAmelCase , __UpperCAmelCase , clobber=__UpperCAmelCase )
with pytest.warns(__UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 2 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ctrl'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , lowerCamelCase_ : List[str]=24_65_34 , lowerCamelCase_ : List[str]=2_56 , lowerCamelCase_ : Optional[int]=12_80 , lowerCamelCase_ : Dict=81_92 , lowerCamelCase_ : Union[str, Any]=48 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Tuple=1e-6 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : List[Any]=True , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : str = n_embd
SCREAMING_SNAKE_CASE : List[str] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = dff
SCREAMING_SNAKE_CASE : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = embd_pdrop
SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = use_cache
super().__init__(**lowerCamelCase_ )
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = "bert-base-cased" ) ->List[str]:
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
a_ = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
a_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(UpperCAmelCase__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a_ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
a_ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ = config["""lr"""]
a_ = int(config["num_epochs"] )
a_ = int(config["seed"] )
a_ = int(config["batch_size"] )
a_ = args.model_name_or_path
set_seed(UpperCAmelCase__ )
a_ = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
a_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a_ = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
a_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a_ = 1
a_ = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a_ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
a_ = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
a_ = 0
# We also need to keep track of the stating epoch so files are named properly
a_ = 0
# Now we train the model
a_ = evaluate.load("glue" , "mrpc" )
a_ = 0
a_ = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
a_ = model(**UpperCAmelCase__ )
a_ = outputs.loss
a_ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a_ = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a_ = model(**UpperCAmelCase__ )
a_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
a_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
a_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
a_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=UpperCAmelCase__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCAmelCase__ , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=UpperCAmelCase__ , default=3 , help="Number of train epochs." , )
a_ = parser.parse_args()
a_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main() | 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__SCREAMING_SNAKE_CASE : Tuple = {'UserAgent': UserAgent().random}
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = script.contents[0]
_UpperCAmelCase : Optional[int] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : List[Any] ):
_UpperCAmelCase : str = F"""https://www.instagram.com/{username}/"""
_UpperCAmelCase : str = self.get_json()
def _A ( self : str ):
_UpperCAmelCase : Tuple = requests.get(self.url , headers=A ).text
_UpperCAmelCase : Optional[Any] = BeautifulSoup(A , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return F"""{self.__class__.__name__}(\'{self.username}\')"""
def __str__( self : int ):
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _A ( self : Union[str, Any] ):
return self.user_data["username"]
@property
def _A ( self : List[str] ):
return self.user_data["full_name"]
@property
def _A ( self : Any ):
return self.user_data["biography"]
@property
def _A ( self : str ):
return self.user_data["business_email"]
@property
def _A ( self : Any ):
return self.user_data["external_url"]
@property
def _A ( self : Tuple ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A ( self : Dict ):
return self.user_data["edge_follow"]["count"]
@property
def _A ( self : Dict ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A ( self : Union[str, Any] ):
return self.user_data["profile_pic_url_hd"]
@property
def _A ( self : Any ):
return self.user_data["is_verified"]
@property
def _A ( self : Dict ):
return self.user_data["is_private"]
def UpperCamelCase_ ( _UpperCAmelCase : Any = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCAmelCase : Tuple = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Any = InstagramUser("""github""")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 31 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a :Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 56 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : Dict = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
with contextlib.closing(sqlitea.connect(__lowerCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE__ : Tuple = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : str = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
SCREAMING_SNAKE_CASE__ : Tuple = iter_sql_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = iter_sql_file(__lowerCAmelCase )
for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
SCREAMING_SNAKE_CASE__ : List[str] = iter_sql_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = iter_sql_file(__lowerCAmelCase )
for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
with pytest.raises(__lowerCAmelCase ):
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
__lowerCamelCase : list[Any] = []
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
def _lowercase ( self : Any ) -> bool:
return self.head == self.tail
def _lowercase ( self : List[Any] , _a : Any ) -> None:
self.data.append(_a )
__lowerCamelCase : str = self.tail + 1
def _lowercase ( self : Optional[Any] ) -> Any:
__lowerCamelCase : List[str] = self.data[self.head]
__lowerCamelCase : str = self.head + 1
return ret
def _lowercase ( self : List[str] ) -> int:
return self.tail - self.head
def _lowercase ( self : int ) -> None:
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : str , _a : Any ) -> None:
__lowerCamelCase : List[str] = data
__lowerCamelCase : MyNode | None = None
__lowerCamelCase : MyNode | None = None
__lowerCamelCase : int = 1
def _lowercase ( self : Tuple ) -> Any:
return self.data
def _lowercase ( self : Optional[Any] ) -> MyNode | None:
return self.left
def _lowercase ( self : List[str] ) -> MyNode | None:
return self.right
def _lowercase ( self : Union[str, Any] ) -> int:
return self.height
def _lowercase ( self : Tuple , _a : Any ) -> None:
__lowerCamelCase : List[Any] = data
def _lowercase ( self : Union[str, Any] , _a : MyNode | None ) -> None:
__lowerCamelCase : int = node
def _lowercase ( self : str , _a : MyNode | None ) -> None:
__lowerCamelCase : Optional[int] = node
def _lowercase ( self : Dict , _a : int ) -> None:
__lowerCamelCase : List[Any] = height
def a_ ( _lowerCAmelCase ) -> int:
if node is None:
return 0
return node.get_height()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int:
if a > b:
return a
return b
def a_ ( _lowerCAmelCase ) -> MyNode:
print('left rotation node:' ,node.get_data() )
__lowerCamelCase : int = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCAmelCase )
__lowerCamelCase : str = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
__lowerCamelCase : Tuple = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCAmelCase )
return ret
def a_ ( _lowerCAmelCase ) -> MyNode:
print('right rotation node:' ,node.get_data() )
__lowerCamelCase : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCAmelCase )
return ret
def a_ ( _lowerCAmelCase ) -> MyNode:
__lowerCamelCase : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCAmelCase ) )
return right_rotation(_lowerCAmelCase )
def a_ ( _lowerCAmelCase ) -> MyNode:
__lowerCamelCase : List[str] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCAmelCase ) )
return left_rotation(_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> MyNode | None:
if node is None:
return MyNode(_lowerCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() ,_lowerCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__lowerCamelCase : Dict = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__lowerCamelCase : int = right_rotation(_lowerCAmelCase )
else:
__lowerCamelCase : Tuple = lr_rotation(_lowerCAmelCase )
else:
node.set_right(insert_node(node.get_right() ,_lowerCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__lowerCamelCase : List[str] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__lowerCamelCase : int = rl_rotation(_lowerCAmelCase )
else:
__lowerCamelCase : Tuple = left_rotation(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_lowerCAmelCase )
return node
def a_ ( _lowerCAmelCase ) -> Any:
while True:
__lowerCamelCase : str = root.get_right()
if right_child is None:
break
__lowerCamelCase : int = right_child
return root.get_data()
def a_ ( _lowerCAmelCase ) -> Any:
while True:
__lowerCamelCase : str = root.get_left()
if left_child is None:
break
__lowerCamelCase : Optional[int] = left_child
return root.get_data()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> MyNode | None:
__lowerCamelCase : str = root.get_left()
__lowerCamelCase : Tuple = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__lowerCamelCase : Optional[Any] = get_left_most(_lowerCAmelCase )
root.set_data(_lowerCAmelCase )
root.set_right(del_node(_lowerCAmelCase ,_lowerCAmelCase ) )
elif left_child is not None:
__lowerCamelCase : List[Any] = left_child
elif right_child is not None:
__lowerCamelCase : List[str] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(_lowerCAmelCase ,_lowerCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCAmelCase ,_lowerCAmelCase ) )
if get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__lowerCamelCase : Optional[int] = left_rotation(_lowerCAmelCase )
else:
__lowerCamelCase : int = rl_rotation(_lowerCAmelCase )
elif get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__lowerCamelCase : List[Any] = right_rotation(_lowerCAmelCase )
else:
__lowerCamelCase : Optional[Any] = lr_rotation(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = my_max(get_height(root.get_right() ) ,get_height(root.get_left() ) ) + 1
root.set_height(_lowerCAmelCase )
return root
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Any ) -> None:
__lowerCamelCase : MyNode | None = None
def _lowercase ( self : Dict ) -> int:
return get_height(self.root )
def _lowercase ( self : Tuple , _a : Any ) -> None:
print('insert:' + str(_a ) )
__lowerCamelCase : Dict = insert_node(self.root , _a )
def _lowercase ( self : List[Any] , _a : Any ) -> None:
print('delete:' + str(_a ) )
if self.root is None:
print('Tree is empty!' )
return
__lowerCamelCase : int = del_node(self.root , _a )
def __str__( self : List[Any] , ) -> str: # a level traversale, gives a more intuitive look on the tree
__lowerCamelCase : Optional[int] = ''
__lowerCamelCase : Optional[Any] = MyQueue()
q.push(self.root )
__lowerCamelCase : str = self.get_height()
if layer == 0:
return output
__lowerCamelCase : Union[str, Any] = 0
while not q.is_empty():
__lowerCamelCase : List[Any] = q.pop()
__lowerCamelCase : List[Any] = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_a )
q.push(_a )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__lowerCamelCase : Union[str, Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _a ) - 1:
__lowerCamelCase : Dict = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a_ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCamelCase = AVLtree()
_UpperCamelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 208 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""xlm-roberta"""
def __init__( self : Any , _a : Optional[Any]=3_0522 , _a : Optional[Any]=768 , _a : int=12 , _a : Tuple=12 , _a : str=3072 , _a : List[Any]="gelu" , _a : int=0.1 , _a : Optional[int]=0.1 , _a : Optional[int]=512 , _a : List[Any]=2 , _a : Optional[Any]=0.02 , _a : str=1e-12 , _a : str=1 , _a : str=0 , _a : Optional[Any]=2 , _a : Optional[int]="absolute" , _a : int=True , _a : Tuple=None , **_a : Any , ) -> Dict:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : str = position_embedding_type
__lowerCamelCase : List[Any] = use_cache
__lowerCamelCase : int = classifier_dropout
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 208 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _a ( A_ ):
UpperCamelCase = '''dpr'''
def __init__( self : List[str], lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2, lowerCAmelCase__ : Optional[Any]=7_6_8, lowerCAmelCase__ : List[str]=1_2, lowerCAmelCase__ : List[str]=1_2, lowerCAmelCase__ : Dict=3_0_7_2, lowerCAmelCase__ : Any="gelu", lowerCAmelCase__ : Union[str, Any]=0.1, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Tuple=5_1_2, lowerCAmelCase__ : str=2, lowerCAmelCase__ : List[str]=0.02, lowerCAmelCase__ : Any=1e-1_2, lowerCAmelCase__ : int=0, lowerCAmelCase__ : Dict="absolute", lowerCAmelCase__ : int = 0, **lowerCAmelCase__ : Union[str, Any], ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase, **_lowerCamelCase )
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Dict = projection_dim
_UpperCamelCase : Dict = position_embedding_type
| 368 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Any, lowerCAmelCase__ : int = 1_2_8, lowerCAmelCase__ : int = 2_5_6, lowerCAmelCase__ : float = 2_000.0, lowerCAmelCase__ : int = 7_6_8, lowerCAmelCase__ : int = 1_2, lowerCAmelCase__ : int = 1_2, lowerCAmelCase__ : int = 6_4, lowerCAmelCase__ : int = 2_0_4_8, lowerCAmelCase__ : float = 0.1, ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = nn.Sequential(
nn.Linear(lowerCAmelCase__, d_model * 4, bias=lowerCAmelCase__ ), nn.SiLU(), nn.Linear(d_model * 4, d_model * 4, bias=lowerCAmelCase__ ), nn.SiLU(), )
_UpperCamelCase : List[Any] = nn.Embedding(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Optional[Any] = nn.Linear(lowerCAmelCase__, lowerCAmelCase__, bias=lowerCAmelCase__ )
_UpperCamelCase : Any = nn.Dropout(p=lowerCAmelCase__ )
_UpperCamelCase : List[str] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
# FiLM conditional T5 decoder
_UpperCamelCase : Any = DecoderLayer(d_model=lowerCAmelCase__, d_kv=lowerCAmelCase__, num_heads=lowerCAmelCase__, d_ff=lowerCAmelCase__, dropout_rate=lowerCAmelCase__ )
self.decoders.append(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = TaLayerNorm(lowerCAmelCase__ )
_UpperCamelCase : Dict = nn.Dropout(p=lowerCAmelCase__ )
_UpperCamelCase : Dict = nn.Linear(lowerCAmelCase__, lowerCAmelCase__, bias=lowerCAmelCase__ )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : Union[str, Any], lowerCAmelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ), key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def snake_case ( self : str, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_UpperCamelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time, embedding_dim=self.config.d_model, max_period=self.config.max_decoder_noise_time, ).to(dtype=self.dtype )
_UpperCamelCase : Union[str, Any] = self.conditioning_emb(lowerCAmelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_UpperCamelCase : int = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_UpperCamelCase : int = torch.broadcast_to(
torch.arange(lowerCAmelCase__, device=decoder_input_tokens.device ), (batch, seq_length), )
_UpperCamelCase : Dict = self.position_encoding(lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.continuous_inputs_projection(lowerCAmelCase__ )
inputs += position_encodings
_UpperCamelCase : Dict = self.dropout(lowerCAmelCase__ )
# decoder: No padding present.
_UpperCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_UpperCamelCase : Tuple = [(x, self.encoder_decoder_mask(lowerCAmelCase__, lowerCAmelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_UpperCamelCase : int = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1 )
_UpperCamelCase : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1 )
for lyr in self.decoders:
_UpperCamelCase : List[Any] = lyr(
lowerCAmelCase__, conditioning_emb=lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, encoder_attention_mask=lowerCAmelCase__, )[0]
_UpperCamelCase : Any = self.decoder_norm(lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.post_dropout(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.spec_out(lowerCAmelCase__ )
return spec_out
class _a ( nn.Module ):
def __init__( self : Union[str, Any], lowerCAmelCase__ : List[Any], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : List[str], lowerCAmelCase__ : List[str], lowerCAmelCase__ : str, lowerCAmelCase__ : Union[str, Any]=1e-6 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase__, d_kv=lowerCAmelCase__, num_heads=lowerCAmelCase__, dropout_rate=lowerCAmelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase__, d_kv=lowerCAmelCase__, num_heads=lowerCAmelCase__, dropout_rate=lowerCAmelCase__, layer_norm_epsilon=lowerCAmelCase__, ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase__, d_ff=lowerCAmelCase__, dropout_rate=lowerCAmelCase__, layer_norm_epsilon=lowerCAmelCase__ ) )
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[Any]=None, lowerCAmelCase__ : List[str]=None, lowerCAmelCase__ : List[Any]=None, lowerCAmelCase__ : Optional[Any]=None, lowerCAmelCase__ : Any=None, ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.layer[0](
lowerCAmelCase__, conditioning_emb=lowerCAmelCase__, attention_mask=lowerCAmelCase__, )
if encoder_hidden_states is not None:
_UpperCamelCase : Any = torch.where(encoder_attention_mask > 0, 0, -1e1_0 ).to(
encoder_hidden_states.dtype )
_UpperCamelCase : int = self.layer[1](
lowerCAmelCase__, key_value_states=lowerCAmelCase__, attention_mask=lowerCAmelCase__, )
# Apply Film Conditional Feed Forward layer
_UpperCamelCase : Optional[int] = self.layer[-1](lowerCAmelCase__, lowerCAmelCase__ )
return (hidden_states,)
class _a ( nn.Module ):
def __init__( self : Tuple, lowerCAmelCase__ : List[Any], lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : int = TaLayerNorm(lowerCAmelCase__ )
_UpperCamelCase : List[Any] = TaFiLMLayer(in_features=d_model * 4, out_features=lowerCAmelCase__ )
_UpperCamelCase : Any = Attention(query_dim=lowerCAmelCase__, heads=lowerCAmelCase__, dim_head=lowerCAmelCase__, out_bias=lowerCAmelCase__, scale_qk=lowerCAmelCase__ )
_UpperCamelCase : List[str] = nn.Dropout(lowerCAmelCase__ )
def snake_case ( self : Optional[int], lowerCAmelCase__ : Tuple, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Union[str, Any]=None, ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Any = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
_UpperCamelCase : str = self.FiLMLayer(lowerCAmelCase__, lowerCAmelCase__ )
# Self-attention block
_UpperCamelCase : Tuple = self.attention(lowerCAmelCase__ )
_UpperCamelCase : Dict = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class _a ( nn.Module ):
def __init__( self : Tuple, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : List[str], lowerCAmelCase__ : str, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = Attention(query_dim=lowerCAmelCase__, heads=lowerCAmelCase__, dim_head=lowerCAmelCase__, out_bias=lowerCAmelCase__, scale_qk=lowerCAmelCase__ )
_UpperCamelCase : Tuple = TaLayerNorm(lowerCAmelCase__, eps=lowerCAmelCase__ )
_UpperCamelCase : int = nn.Dropout(lowerCAmelCase__ )
def snake_case ( self : Optional[Any], lowerCAmelCase__ : List[Any], lowerCAmelCase__ : str=None, lowerCAmelCase__ : Union[str, Any]=None, ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.layer_norm(lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.attention(
lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__, attention_mask=attention_mask.squeeze(1 ), )
_UpperCamelCase : str = hidden_states + self.dropout(lowerCAmelCase__ )
return layer_output
class _a ( nn.Module ):
def __init__( self : Tuple, lowerCAmelCase__ : int, lowerCAmelCase__ : Tuple, lowerCAmelCase__ : Any, lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Any = TaDenseGatedActDense(d_model=lowerCAmelCase__, d_ff=lowerCAmelCase__, dropout_rate=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = TaFiLMLayer(in_features=d_model * 4, out_features=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = TaLayerNorm(lowerCAmelCase__, eps=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = nn.Dropout(lowerCAmelCase__ )
def snake_case ( self : Tuple, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : List[str]=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
_UpperCamelCase : Dict = self.film(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : str = self.DenseReluDense(lowerCAmelCase__ )
_UpperCamelCase : str = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class _a ( nn.Module ):
def __init__( self : str, lowerCAmelCase__ : List[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[str] = nn.Linear(lowerCAmelCase__, lowerCAmelCase__, bias=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = nn.Linear(lowerCAmelCase__, lowerCAmelCase__, bias=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = nn.Linear(lowerCAmelCase__, lowerCAmelCase__, bias=lowerCAmelCase__ )
_UpperCamelCase : List[str] = nn.Dropout(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = NewGELUActivation()
def snake_case ( self : str, lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.act(self.wi_a(lowerCAmelCase__ ) )
_UpperCamelCase : Dict = self.wi_a(lowerCAmelCase__ )
_UpperCamelCase : Any = hidden_gelu * hidden_linear
_UpperCamelCase : Any = self.dropout(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.wo(lowerCAmelCase__ )
return hidden_states
class _a ( nn.Module ):
def __init__( self : Union[str, Any], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[Any]=1e-6 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(lowerCAmelCase__ ) )
_UpperCamelCase : Tuple = eps
def snake_case ( self : Dict, lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1, keepdim=lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_UpperCamelCase : int = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _a ( nn.Module ):
def snake_case ( self : Optional[Any], lowerCAmelCase__ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCAmelCase__, 3.0 )) ))
class _a ( nn.Module ):
def __init__( self : List[str], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : int ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Tuple = nn.Linear(lowerCAmelCase__, out_features * 2, bias=lowerCAmelCase__ )
def snake_case ( self : int, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.scale_bias(lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = torch.chunk(lowerCAmelCase__, 2, -1 )
_UpperCamelCase : Optional[int] = x * (1 + scale) + shift
return x
| 128 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = RoCBertTokenizer
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_non_english
def _snake_case ( self ) -> Tuple:
super().setUp()
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowerCAmelCase = {}
lowerCAmelCase = {}
for i, value in enumerate(lowercase ):
lowerCAmelCase = i
lowerCAmelCase = i
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(lowercase , lowercase , ensure_ascii=lowercase )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(lowercase , lowercase , ensure_ascii=lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(lowercase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self ) -> Any:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=lowercase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase = {}
for i, token in enumerate(lowercase ):
lowerCAmelCase = i
lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=lowercase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self ) -> int:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self ) -> int:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self ) -> int:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def _snake_case ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(lowercase , """do_lower_case""" ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = ["""的""", """人""", """有"""]
lowerCAmelCase = """""".join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(lowercase )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(lowercase )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.encode("""你好""" , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.encode("""你是谁""" , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = """你好,你是谁"""
lowerCAmelCase = tokenizer.tokenize(lowercase )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(lowercase )
lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(lowercase )
lowerCAmelCase = tokenizer.prepare_for_model(
lowercase , lowercase , lowercase , add_special_tokens=lowercase )
lowerCAmelCase = tokenizer.encode_plus(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
| 46 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A__ ( A__ ):
A__ = 'pix2struct_text_model'
A__ = ['past_key_values']
A__ = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , _a : List[Any]=5_0244 , _a : Optional[int]=768 , _a : Tuple=64 , _a : List[str]=2048 , _a : int=12 , _a : Optional[int]=12 , _a : Dict=32 , _a : Any=128 , _a : List[str]=0.1 , _a : List[Any]=1e-6 , _a : Optional[Any]=1.0 , _a : Optional[Any]="gelu_new" , _a : List[str]=0 , _a : Tuple=False , _a : Optional[int]=0 , _a : Optional[int]=1 , _a : Any=False , _a : List[Any]=True , **_a : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =d_kv
_SCREAMING_SNAKE_CASE =d_ff
_SCREAMING_SNAKE_CASE =num_layers
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =relative_attention_num_buckets
_SCREAMING_SNAKE_CASE =relative_attention_max_distance
_SCREAMING_SNAKE_CASE =dropout_rate
_SCREAMING_SNAKE_CASE =layer_norm_epsilon
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =decoder_start_token_id
# for backwards compatibility
_SCREAMING_SNAKE_CASE =dense_act_fn
super().__init__(
pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , tie_word_embeddings=_a , is_decoder=_a , **_a , )
@classmethod
def A ( cls : str , _a : Union[str, os.PathLike] , **_a : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_SCREAMING_SNAKE_CASE =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class A__ ( A__ ):
A__ = 'pix2struct_vision_model'
def __init__( self : Dict , _a : Union[str, Any]=768 , _a : Union[str, Any]=768 , _a : Union[str, Any]=2048 , _a : Optional[Any]=64 , _a : Optional[Any]=12 , _a : Optional[Any]=12 , _a : Optional[Any]="gelu_new" , _a : int=1e-6 , _a : List[Any]=0.0 , _a : str=0.0 , _a : Optional[int]=1e-10 , _a : int=1.0 , _a : List[str]=4096 , _a : Optional[Any]=32 , _a : Tuple=128 , **_a : Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =patch_embed_hidden_size
_SCREAMING_SNAKE_CASE =d_ff
_SCREAMING_SNAKE_CASE =dropout_rate
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =dense_act_fn
_SCREAMING_SNAKE_CASE =seq_len
_SCREAMING_SNAKE_CASE =relative_attention_num_buckets
_SCREAMING_SNAKE_CASE =relative_attention_max_distance
_SCREAMING_SNAKE_CASE =d_kv
@classmethod
def A ( cls : int , _a : Union[str, os.PathLike] , **_a : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_SCREAMING_SNAKE_CASE =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class A__ ( A__ ):
A__ = 'pix2struct'
A__ = True
def __init__( self : Optional[int] , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=1.0 , _a : Union[str, Any]=0.02 , _a : Optional[Any]=False , _a : str=False , _a : Any=True , **_a : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=_a , is_encoder_decoder=_a , **_a )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
_SCREAMING_SNAKE_CASE =PixaStructTextConfig(**_a )
_SCREAMING_SNAKE_CASE =PixaStructVisionConfig(**_a )
_SCREAMING_SNAKE_CASE =self.text_config.decoder_start_token_id
_SCREAMING_SNAKE_CASE =self.text_config.pad_token_id
_SCREAMING_SNAKE_CASE =self.text_config.eos_token_id
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =self.initializer_range
_SCREAMING_SNAKE_CASE =self.initializer_range
_SCREAMING_SNAKE_CASE =is_vqa
@classmethod
def A ( cls : List[str] , _a : PixaStructTextConfig , _a : PixaStructVisionConfig , **_a : Dict ) -> Optional[int]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A ( self : Dict ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 363 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 114 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : list[float] , __lowercase : list[float] ) -> float:
'''simple docstring'''
_UpperCAmelCase = sorted(numsa + numsa )
_UpperCAmelCase , _UpperCAmelCase = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :Tuple = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__SCREAMING_SNAKE_CASE :Any = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 22 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=a__ )
SCREAMING_SNAKE_CASE : Tuple = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a__ )
EnvironmentCommand.register_subcommand(a__ )
TestCommand.register_subcommand(a__ )
RunBeamCommand.register_subcommand(a__ )
DummyDataCommand.register_subcommand(a__ )
# Parse args
SCREAMING_SNAKE_CASE : Tuple = parser.parse_known_args()
if not hasattr(a__ , '''func''' ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE : int = parse_unknown_args(a__ )
# Run
SCREAMING_SNAKE_CASE : Optional[Any] = args.func(a__ , **a__ )
service.run()
if __name__ == "__main__":
main()
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = '''▁'''
a__ : List[Any] = {'''vocab_file''': '''spiece.model'''}
a__ : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
a__ : str = {
'''google/pegasus-xsum''': 512,
}
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<mask_2>" , _lowerCamelCase="<mask_1>" , _lowerCamelCase=None , _lowerCamelCase=103 , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
SCREAMING_SNAKE_CASE : Dict = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"""
F""" {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE : Dict = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = mask_token_sent
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowerCAmelCase ( self ) ->int:
return len(self.sp_model ) + self.offset
def __lowerCAmelCase ( self ) ->Dict[str, int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.piece_to_id(_lowerCamelCase )
return sp_id + self.offset
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->str:
return 1
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 19 | 0 |
"""simple docstring"""
lowerCamelCase_ : List[Any] = 0 # The first color of the flag.
lowerCamelCase_ : str = 1 # The second color of the flag.
lowerCamelCase_ : str = 2 # The third color of the flag.
lowerCamelCase_ : Tuple = (red, white, blue)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if not sequence:
return []
if len(_UpperCAmelCase ) == 1:
return list(_UpperCAmelCase )
A_ : str = 0
A_ : Optional[int] = len(_UpperCAmelCase ) - 1
A_ : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
A_ , A_ : Union[str, Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A_ , A_ : str = sequence[high], sequence[mid]
high -= 1
else:
A_ : int = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Dict = input('Enter numbers separated by commas:\n').strip()
lowerCamelCase_ : List[str] = [int(item.strip()) for item in user_input.split(',')]
print(F"{dutch_national_flag_sort(unsorted)}") | 286 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
A_ : Tuple = os.path.join(_UpperCAmelCase , 'words.txt' )
A_ : List[Any] = ''
with open(_UpperCAmelCase ) as f:
A_ : int = f.readline()
A_ : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A_ : Dict = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution()) | 286 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 1_2_8_0_2_2
SCREAMING_SNAKE_CASE__ = 1_2_8_0_2_8
@require_sentencepiece
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = MaMaaaTokenizer
lowercase = False
lowercase = False
lowercase = True
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = """</s>"""
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , """This is a test""" )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
lowercase = """facebook/m2m100_418M"""
lowercase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowercase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowercase = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def A__ ( cls ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
UpperCamelCase = 1
return cls
def A__ ( self ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128063 )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = """en"""
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = """en"""
UpperCamelCase = """fr"""
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCamelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCamelCase = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCamelCase = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128006,
} , )
| 183 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__ ( __UpperCamelCase )-> Dict: # picklable for multiprocessing
return x.sum()
def lowercase__ ( __UpperCamelCase )-> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class a_ :
lowercase = 42
lowercase = 42
class a_ ( lowerCamelCase ):
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"""a""": 2, """b""": 3}
UpperCamelCase = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 2
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
UpperCamelCase = {"""a""": 2, """b""": 0, """c""": 2}
UpperCamelCase = {
"""a""": np.eye(2 ).astype(_SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(_SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(_SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , map_numpy=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda _SCREAMING_SNAKE_CASE : x + 1 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = {"""a""": 1, """b""": 2}
UpperCamelCase = {"""a""": 3, """b""": 4}
UpperCamelCase = {"""a""": 5, """b""": 6}
UpperCamelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
class a_ :
lowercase = """bar"""
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(__UpperCamelCase )}
UpperCamelCase = map_nested(lambda __UpperCamelCase : x + 10 , __UpperCamelCase , num_proc=__UpperCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( lowerCamelCase ):
@require_tf
def A__ ( self ) -> Any:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(_SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(_SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=_SCREAMING_SNAKE_CASE ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A__ ( self ) -> Dict:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def lowercase__ ( )-> Union[str, Any]:
UpperCamelCase = A(x=1 , y="""foobar""" )
UpperCamelCase = {"""x""": 1, """y""": """foobar"""}
assert asdict(__UpperCamelCase ) == expected_output
UpperCamelCase = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
UpperCamelCase = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
return text.split()
def lowercase__ ( __UpperCamelCase )-> List[str]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__ ( )-> int:
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(__UpperCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__UpperCamelCase ) == 4
| 183 | 1 |
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case__ (lowercase__ ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = "facebook/bart-large-mnli"
__lowerCAmelCase :List[str] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__lowerCAmelCase :List[str] = "text_classifier"
__lowerCAmelCase :Tuple = AutoTokenizer
__lowerCAmelCase :Tuple = AutoModelForSequenceClassification
__lowerCAmelCase :str = ["text", ["text"]]
__lowerCAmelCase :Union[str, Any] = ["text"]
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
super().setup()
a__ : Optional[int] = self.model.config
a__ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
a__ : Optional[int] = int(__lowercase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowercase ) , [F'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : int = outputs.logits
a__ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 170 |
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _snake_case :
def __init__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ = len(a__ ) - 1
def lowerCAmelCase__ ( self , a__ ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a__ ) , 5 ) == 1
return output_values
def lowerCAmelCase__ ( self , a__ ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = self.basis_function(a__ )
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase__ ( self , a__ = 0.0_1 ) -> Any:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
snake_case_ = [] # x coordinates of points to plot
snake_case_ = [] # y coordinates of points to plot
snake_case_ = 0.0
while t <= 1:
snake_case_ = self.bezier_curve_function(a__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ = [i[0] for i in self.list_of_points]
snake_case_ = [i[1] for i in self.list_of_points]
plt.plot(
a__ , a__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(a__ , a__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 92 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = int(snake_case )
assert noofclusters < len(snake_case )
# Find out the dimensionality
snake_case_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ = list(range(len(snake_case ) ) )
shuffle(snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ = tf.placeholder("float64" , [dim] )
snake_case_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case , snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ = [tf.Variable(0 ) for i in range(len(snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ = tf.placeholder("int32" )
snake_case_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case , snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ = tf.reduce_mean(snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ = tf.placeholder("float" , [dim] )
snake_case_ = tf.placeholder("float" , [dim] )
snake_case_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case , snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ = tf.placeholder("float" , [noofclusters] )
snake_case_ = tf.argmin(snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ = 1_0_0
for _ in range(snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case ) ):
snake_case_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ = [
sess.run(snake_case , feed_dict={va: vect, va: sess.run(snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ = sess.run(
snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case ):
# Collect all the vectors assigned to this cluster
snake_case_ = [
vectors[i]
for i in range(len(snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ = sess.run(
snake_case , feed_dict={mean_input: array(snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ = sess.run(snake_case )
snake_case_ = sess.run(snake_case )
return centroids, assignments
| 92 | 1 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = """ml.p3.2xlarge"""
lowerCAmelCase__ : Optional[Any] = """accelerate_sagemaker_execution_role"""
lowerCAmelCase__ : Optional[int] = """hf-sm"""
lowerCAmelCase__ : List[Any] = """us-east-1"""
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : List[str] = """accelerate-sagemaker-1"""
lowerCAmelCase__ : Any = """1.6"""
lowerCAmelCase__ : Optional[Any] = """4.4"""
lowerCAmelCase__ : Union[str, Any] = """train.py"""
lowerCAmelCase__ : str = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCAmelCase__ : Tuple = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase )
with pytest.raises(UpperCamelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 2 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any )-> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Dict )-> Tuple:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case = tempfile.mktemp()
with open(__snake_case , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , __snake_case )
snake_case = AlbertTokenizer.from_pretrained(__snake_case )
finally:
os.remove(__snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , __snake_case )
snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def lowerCAmelCase ( self : Any )-> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : List[str] )-> List[str]:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Any )-> List[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def lowerCAmelCase ( self : int )-> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__snake_case )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case , repo_id="""test-tokenizer""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__snake_case )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__snake_case , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] )-> int:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizerFast.from_pretrained(__snake_case )
bert_tokenizer.save_pretrained(__snake_case )
snake_case = CustomTokenizerFast.from_pretrained(__snake_case )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
snake_case = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__snake_case , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Dict:
snake_case = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def lowerCAmelCase ( self : str )-> Dict:
snake_case = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def lowerCAmelCase ( self : int )-> Optional[int]:
snake_case = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
snake_case = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
snake_case = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case = Trie()
snake_case = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__snake_case , ["""AB""", """C"""] )
| 362 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( _snake_case , unittest.TestCase ):
lowercase = TextToVideoSDPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A_ = CLIPTextModel(UpperCamelCase__ )
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Tuple:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = TextToVideoSDPipeline(**UpperCamelCase__ )
A_ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = self.get_dummy_inputs(UpperCamelCase__ )
A_ = """np"""
A_ = sd_pipe(**UpperCamelCase__ ).frames
A_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A_ = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ = pipe.to("""cuda""" )
A_ = """Spiderman is surfing"""
A_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=25 , output_type="""pt""" ).frames
A_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A_ = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A_ = pipe.to("""cuda""" )
A_ = """Spiderman is surfing"""
A_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""pt""" ).frames
A_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 162 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = self.vocab_size - 1
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = inputs_dict["""labels"""]
A_ = inputs_dict["""labels"""]
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OpenAIGPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
A_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 162 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase =" def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[Any]:
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,"""models/bert/""" ) )
A = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase_ ,"""src/transformers/models/bert/modeling_bert.py""" ) ,os.path.join(self.transformer_dir ,"""models/bert/modeling_bert.py""" ) ,)
def UpperCamelCase__ ( self ) -> List[str]:
A = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Dict:
A = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_1_9 )
A = black.format_str(lowerCamelCase_ ,mode=lowerCamelCase_ )
A = os.path.join(self.transformer_dir ,"""new_code.py""" )
with open(lowerCamelCase_ ,"""w""" ,newline="""\n""" ) as f:
f.write(lowerCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=lowerCamelCase_ )
with open(lowerCamelCase_ ,"""r""" ) as f:
self.assertTrue(f.read() ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Tuple:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" ,"""BertLMPredictionHead""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" ,"""BertLMPredictionHead""" ,lowerCamelCase_ ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" ,"""TestModelLMPredictionHead""" ,re.sub("""Bert""" ,"""TestModel""" ,lowerCamelCase_ ) ,)
# Copy consistency with a really long name
A = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' ,f'{long_class_name}LMPredictionHead' ,re.sub("""Bert""" ,lowerCamelCase_ ,lowerCamelCase_ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" ,"""TestModelLMPredictionHead""" ,lowerCamelCase_ ,overwrite_result=re.sub("""Bert""" ,"""TestModel""" ,lowerCamelCase_ ) ,)
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A , A = check_copies.convert_to_localized_md(
lowerCamelCase_ ,lowerCamelCase_ ,localized_readme["""format_model_list"""] )
self.assertFalse(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
A , A = check_copies.convert_to_localized_md(
lowerCamelCase_ ,lowerCamelCase_ ,localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase_ )
A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A , A = check_copies.convert_to_localized_md(
lowerCamelCase_ ,lowerCamelCase_ ,localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
| 362 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''distilbert'''
_lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self ,lowerCamelCase_=3_0_5_2_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=False ,lowerCamelCase_=6 ,lowerCamelCase_=1_2 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=4 * 7_6_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.02 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.2 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
A = vocab_size
A = max_position_embeddings
A = sinusoidal_pos_embds
A = n_layers
A = n_heads
A = dim
A = hidden_dim
A = dropout
A = attention_dropout
A = activation
A = initializer_range
A = qa_dropout
A = seq_classif_dropout
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 77 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 0 |
from __future__ import annotations
from collections.abc import Callable
a =list[list[float | int]]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Matrix:
__lowerCamelCase : int = len(lowerCamelCase__ )
__lowerCamelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase__ )]
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float
for row in range(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = matrix[row][col]
__lowerCamelCase : Union[str, Any] = vector[row][0]
__lowerCamelCase : Tuple = 0
__lowerCamelCase : int = 0
while row < size and col < size:
# pivoting
__lowerCamelCase : List[str] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase__ , lowerCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCamelCase , __lowerCamelCase : List[str] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = augmented[rowa][col] / augmented[row][col]
__lowerCamelCase : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCamelCase__ ):
for row in range(lowerCamelCase__ ):
__lowerCamelCase : str = augmented[row][col] / augmented[col][col]
for cola in range(lowerCamelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(lowerCamelCase__ )
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Callable[[int], int]:
__lowerCamelCase : int = len(lowerCamelCase__ )
__lowerCamelCase : Matrix = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
__lowerCamelCase : Matrix = [[0] for _ in range(lowerCamelCase__ )]
__lowerCamelCase : Matrix
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for x_val, y_val in enumerate(lowerCamelCase__ ):
for col in range(lowerCamelCase__ ):
__lowerCamelCase : List[Any] = (x_val + 1) ** (size - col - 1)
__lowerCamelCase : List[Any] = y_val
__lowerCamelCase : str = solve(lowerCamelCase__ , lowerCamelCase__ )
def interpolated_func(lowerCamelCase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCamelCase__ ) )
return interpolated_func
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = question_function , lowerCamelCase__ = 1_0 ) -> int:
__lowerCamelCase : list[int] = [func(lowerCamelCase__ ) for x_val in range(1 , order + 1 )]
__lowerCamelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowerCamelCase : int = 0
__lowerCamelCase : Callable[[int], int]
__lowerCamelCase : int
for poly in polynomials:
__lowerCamelCase : Dict = 1
while func(lowerCamelCase__ ) == poly(lowerCamelCase__ ):
x_val += 1
ret += poly(lowerCamelCase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 113 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a ="""true"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=8_2 , lowerCamelCase__=1_6 ) -> List[Any]:
set_seed(4_2 )
__lowerCamelCase : Tuple = RegressionModel()
__lowerCamelCase : str = deepcopy(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = RegressionDataset(length=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ )
model.to(accelerator.device )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
__lowerCamelCase : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
with accelerator.main_process_first():
__lowerCamelCase : Union[str, Any] = dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
__lowerCamelCase : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
if use_longest:
return tokenizer.pad(lowerCamelCase__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1_6 )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = Accelerator(dispatch_batches=lowerCamelCase__ , split_batches=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = get_dataloader(lowerCamelCase__ , not dispatch_batches )
__lowerCamelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = []
for batch in dataloader:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = batch.values()
with torch.no_grad():
__lowerCamelCase : Tuple = model(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase , __lowerCamelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase__ )
targs.append(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.cat(lowerCamelCase__ ), torch.cat(lowerCamelCase__ )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=8_2 , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=1_6 ) -> Dict:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = get_basic_setup(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Dict = generate_predictions(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert (
len(lowerCamelCase__ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase__ )}"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = False , lowerCamelCase__ = False ) -> Dict:
__lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = get_mrpc_setup(lowerCamelCase__ , lowerCamelCase__ )
# First do baseline
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = setup['no']
model.to(lowerCamelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase__ )
with torch.inference_mode():
__lowerCamelCase : Dict = model(**lowerCamelCase__ )
__lowerCamelCase : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase__ , references=batch['labels'] )
__lowerCamelCase : str = metric.compute()
# Then do distributed
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase : List[str] = model(**lowerCamelCase__ )
__lowerCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
__lowerCamelCase : List[str] = batch['labels']
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
__lowerCamelCase : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : int = Accelerator(split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(lowerCamelCase__ , lowerCamelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase : Optional[Any] = Accelerator(split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(lowerCamelCase__ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
__lowerCamelCase : Dict = Accelerator()
test_torch_metrics(lowerCamelCase__ , 5_1_2 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 113 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
_lowerCAmelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
_lowerCAmelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCAmelCase = model(lowercase_ )['''last_hidden_state''']
_lowerCAmelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice.
_lowerCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 158 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[str] ):
requires_backends(self ,['''speech'''] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowercase__ = ["speech"]
def __init__( self : Union[str, Any] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
requires_backends(self ,['''speech'''] )
| 106 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( A__ : Tuple , A__ : Dict , A__ : int , A__ : str="attention" ):
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Dict = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def UpperCamelCase_ ( A__ : Tuple , A__ : List[str] , A__ : Tuple , A__ : List[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : Union[str, Any] = (wi_a, wi_a)
else:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : Dict = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def UpperCamelCase_ ( A__ : str , A__ : List[Any] , A__ : int , A__ : Dict ):
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def UpperCamelCase_ ( A__ : dict , *, A__ : int , A__ : bool ):
'''simple docstring'''
lowerCAmelCase_ : str = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(A__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , A__ )
lowerCAmelCase_ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(A__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(A__ , A__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(A__ , A__ , """encoder""" , """attention""" )
lowerCAmelCase_ : int = layer_norm
lowerCAmelCase_ : int = k.T
lowerCAmelCase_ : str = o.T
lowerCAmelCase_ : Dict = q.T
lowerCAmelCase_ : Optional[Any] = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(A__ , A__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_, lowerCAmelCase_ : Dict = tax_mlp_lookup(A__ , A__ , """encoder""" , A__ )
lowerCAmelCase_ : Dict = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Dict = wi[0].T
lowerCAmelCase_ : Tuple = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Union[str, Any] = wo.T
lowerCAmelCase_ : Optional[int] = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(A__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(A__ , A__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Tuple = tax_attention_lookup(A__ , A__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Optional[Any] = layer_norm
lowerCAmelCase_ : Optional[Any] = k.T
lowerCAmelCase_ : Optional[int] = o.T
lowerCAmelCase_ : Optional[Any] = q.T
lowerCAmelCase_ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : str = tax_layer_norm_lookup(A__ , A__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = tax_attention_lookup(A__ , A__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Union[str, Any] = layer_norm
lowerCAmelCase_ : Tuple = k.T
lowerCAmelCase_ : List[str] = o.T
lowerCAmelCase_ : str = q.T
lowerCAmelCase_ : List[str] = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : Optional[int] = tax_layer_norm_lookup(A__ , A__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_, lowerCAmelCase_ : Dict = tax_mlp_lookup(A__ , A__ , """decoder""" , A__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Any = wi[0].T
lowerCAmelCase_ : int = wi[1].T
else:
lowerCAmelCase_ : Dict = wi.T
lowerCAmelCase_ : Dict = wo.T
lowerCAmelCase_ : str = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : bool ):
'''simple docstring'''
lowerCAmelCase_ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Dict = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def UpperCamelCase_ ( A__ : List[Any] , A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = checkpoints.load_tax_checkpoint(A__ )
lowerCAmelCase_ : Any = convert_tax_to_pytorch(A__ , num_layers=config.num_layers , is_encoder_only=A__ )
lowerCAmelCase_ : Dict = make_state_dict(A__ , A__ )
model.load_state_dict(A__ , strict=A__ )
def UpperCamelCase_ ( A__ : Any , A__ : Any , A__ : List[str] , A__ : bool = False ):
'''simple docstring'''
lowerCAmelCase_ : Dict = TaConfig.from_json_file(A__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : int = TaEncoderModel(A__ )
else:
lowerCAmelCase_ : str = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A__ , A__ , A__ , A__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(A__ )
# Verify that we can load the checkpoint.
model.from_pretrained(A__ )
print("""Done""" )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
__A : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : List[Any] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCAmelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : List[Any] = logging.getLogger()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
parser.add_argument('-f' )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
return args.f
def A_ ( a , a="eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(a , f"{split}_results.json" )
if os.path.exists(a ):
with open(a , 'r' ) as f:
return json.load(a )
raise ValueError(f"can't find {path}" )
lowerCAmelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _A ( __magic_name__):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : List[Any] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE_ : Any = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : List[Any] = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE_ : Dict = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result['eval_perplexity'] , 100 )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE_ : Dict = get_results(_SCREAMING_SNAKE_CASE , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 10 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : List[str] = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE_ : Any = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result['eval_perplexity'] , 42 )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : List[str] = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE_ : Tuple = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE_ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : int = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE_ : Tuple = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : Any = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
run_qa.main()
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_f1'] , 30 )
self.assertGreaterEqual(result['eval_exact'] , 30 )
| 253 |
def A_ ( a ):
"""simple docstring"""
return "".join(chr(ord(a ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 253 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = """xlnet"""
snake_case : Optional[Any] = ["""mems"""]
snake_case : Any = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowerCAmelCase=32000 , __lowerCAmelCase=1024 , __lowerCAmelCase=24 , __lowerCAmelCase=16 , __lowerCAmelCase=4096 , __lowerCAmelCase="gelu" , __lowerCAmelCase=True , __lowerCAmelCase="bi" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase="last" , __lowerCAmelCase=True , __lowerCAmelCase="tanh" , __lowerCAmelCase=0.1 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase__ = d_model // n_head
UpperCamelCase__ = ff_activation
UpperCamelCase__ = d_inner
UpperCamelCase__ = untie_r
UpperCamelCase__ = attn_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dropout
UpperCamelCase__ = mem_len
UpperCamelCase__ = reuse_len
UpperCamelCase__ = bi_data
UpperCamelCase__ = clamp_len
UpperCamelCase__ = same_length
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_last_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
UpperCamelCase__ = kwargs["""use_cache"""]
UpperCamelCase__ = use_mems_eval
UpperCamelCase__ = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _lowerCamelCase ( self , __lowerCAmelCase ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 87 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : Dict = logging.get_logger(__name__)
class A_ ( _a , _a ):
'''simple docstring'''
a__ = "maskformer-swin"
a__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , lowercase__=224 , lowercase__=4 , lowercase__=3 , lowercase__=96 , lowercase__=[2, 2, 6, 2] , lowercase__=[3, 6, 12, 24] , lowercase__=7 , lowercase__=4.0 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__="gelu" , lowercase__=False , lowercase__=0.02 , lowercase__=1E-5 , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Union[str, Any]:
super().__init__(**lowercase__ )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(lowercase__ )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(lowercase__ ) - 1) )
__UpperCAmelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(lowercase__ ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
| 333 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowercase_ = logging.getLogger(__name__)
lowercase_ = "pytorch_model.bin"
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCAmelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The name of the task to train on.'} , )
__UpperCAmelCase : Optional[List[str]] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Random seed for initialization.'} , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a = int(eval_result * len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
__a = dataset.sort('''probability''' , reverse=lowerCAmelCase__ )
__a = dataset.select(range(lowerCAmelCase__ ) )
__a = dataset.remove_columns(['''label''', '''probability'''] )
__a = dataset.rename_column('''prediction''' , '''label''' )
__a = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} )
__a = dataset.shuffle(seed=args.seed )
__a = os.path.join(lowerCAmelCase__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ )
else:
dataset.to_json(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ) -> str:
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a = STModelArguments(model_name_or_path=lowerCAmelCase__ )
__a = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ )
__a = STTrainingArguments(output_dir=lowerCAmelCase__ )
__a = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase__ ).items():
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Sanity checks
__a = {}
__a = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a = args.train_file
__a = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a = args.eval_file
for key in data_files:
__a = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__a = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
__a = f'''{args.output_dir}/self-train_iter-{{}}'''.format
__a = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
accelerator.wait_for_everyone()
__a = None
__a = None
__a = 0
__a = False
# Show the progress bar
__a = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a = data_dir_format(lowerCAmelCase__ )
assert os.path.exists(lowerCAmelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a = os.path.join(lowerCAmelCase__ , '''stage-1''' )
__a = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
arguments_dict.update({key: value} )
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowerCAmelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' )
__a = os.path.join(lowerCAmelCase__ , '''stage-2''' )
# Update arguments_dict
__a = model_path
__a = data_files['''train''']
__a = current_output_dir
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowerCAmelCase__ )
__a = iteration
__a = data_dir_format(iteration + 1 )
__a = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , '''best-checkpoint''' ) )
__a = config.idalabel
__a = os.path.join(lowerCAmelCase__ , '''eval_results_best-checkpoint.json''' )
__a = os.path.join(lowerCAmelCase__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''r''' ) as f:
__a = float(json.load(lowerCAmelCase__ )[args.eval_metric] )
__a = os.path.join(lowerCAmelCase__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowerCAmelCase__ )
# Loading the dataset from local csv or json files.
__a = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
__a = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCAmelCase__ ):
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.wait_for_everyone()
__a = os.path.join(lowerCAmelCase__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a = eval_result
if best_iteration is None:
__a = new_iteration
__a = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a = new_iteration
__a = new_eval_result
__a = 0
else:
if new_eval_result == best_eval_result:
__a = new_iteration
__a = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowerCAmelCase__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 1 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__snake_case = logging.getLogger(__name__)
class __lowerCamelCase (_a ):
_lowercase = """sequence-classification"""
def __init__( self: Dict,A_: Tuple ):
'''simple docstring'''
if type(A_ ) == dict:
__UpperCamelCase = Namespace(**A_ )
__UpperCamelCase = glue_output_modes[hparams.task]
__UpperCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(A_,A_,self.mode )
def snake_case_ ( self: Optional[Any],**A_: int ):
'''simple docstring'''
return self.model(**A_ )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__UpperCamelCase = self(**A_ )
__UpperCamelCase = outputs[0]
__UpperCamelCase = self.trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.hparams
__UpperCamelCase = processors[args.task]()
__UpperCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
__UpperCamelCase = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s',A_ )
else:
logger.info('Creating features from dataset file at %s',args.data_dir )
__UpperCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
__UpperCamelCase = convert_examples_to_features(
A_,self.tokenizer,max_length=args.max_seq_length,label_list=self.labels,output_mode=args.glue_output_mode,)
logger.info('Saving features into cached file %s',A_ )
torch.save(A_,A_ )
def snake_case_ ( self: int,A_: str,A_: int,A_: bool = False ):
'''simple docstring'''
__UpperCamelCase = 'dev' if mode == 'test' else mode
__UpperCamelCase = self._feature_file(A_ )
logger.info('Loading features from cached file %s',A_ )
__UpperCamelCase = torch.load(A_ )
__UpperCamelCase = torch.tensor([f.input_ids for f in features],dtype=torch.long )
__UpperCamelCase = torch.tensor([f.attention_mask for f in features],dtype=torch.long )
__UpperCamelCase = torch.tensor([f.token_type_ids for f in features],dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = torch.tensor([f.label for f in features],dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = torch.tensor([f.label for f in features],dtype=torch.float )
return DataLoader(
TensorDataset(A_,A_,A_,A_ ),batch_size=A_,shuffle=A_,)
def snake_case_ ( self: Union[str, Any],A_: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__UpperCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__UpperCamelCase = self(**A_ )
__UpperCamelCase, __UpperCamelCase = outputs[:2]
__UpperCamelCase = logits.detach().cpu().numpy()
__UpperCamelCase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case_ ( self: str,A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__UpperCamelCase = np.concatenate([x['pred'] for x in outputs],axis=0 )
if self.hparams.glue_output_mode == "classification":
__UpperCamelCase = np.argmax(A_,axis=1 )
elif self.hparams.glue_output_mode == "regression":
__UpperCamelCase = np.squeeze(A_ )
__UpperCamelCase = np.concatenate([x['target'] for x in outputs],axis=0 )
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__UpperCamelCase = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task,A_,A_ )}
__UpperCamelCase = dict(results.items() )
__UpperCamelCase = results
return ret, preds_list, out_label_list
def snake_case_ ( self: Dict,A_: list ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._eval_end(A_ )
__UpperCamelCase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case_ ( self: Dict,A_: Dict ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = self._eval_end(A_ )
__UpperCamelCase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case_ ( A_: Union[str, Any],A_: Any ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A_,A_ )
parser.add_argument(
'--max_seq_length',default=128,type=A_,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
),)
parser.add_argument(
'--task',default='',type=A_,required=A_,help='The GLUE task to run',)
parser.add_argument(
'--gpus',default=0,type=A_,help='The number of GPUs allocated for this, it is by default 0 meaning none',)
parser.add_argument(
'--overwrite_cache',action='store_true',help='Overwrite the cached training and evaluation sets' )
return parser
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
add_generic_args(_lowercase , os.getcwd() )
__UpperCamelCase = GLUETransformer.add_model_specific_args(_lowercase , os.getcwd() )
__UpperCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__UpperCamelCase = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__UpperCamelCase = GLUETransformer(_lowercase )
__UpperCamelCase = generic_train(_lowercase , _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_lowercase ) )
__UpperCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 310 |
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 310 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
__lowerCamelCase = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__lowerCamelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
__lowerCamelCase = primes[:idx]
break
__lowerCamelCase , __lowerCamelCase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__lowerCamelCase = False
for r in range(UpperCamelCase__ ):
__lowerCamelCase = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__lowerCamelCase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 348 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.